aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format1
-rw-r--r--.github/workflows/libcxx-build-and-test.yaml2
-rw-r--r--bolt/include/bolt/Core/BinaryContext.h6
-rw-r--r--bolt/include/bolt/Rewrite/MetadataRewriters.h2
-rw-r--r--bolt/include/bolt/Rewrite/RewriteInstance.h5
-rw-r--r--bolt/lib/Core/BinaryContext.cpp2
-rw-r--r--bolt/lib/Core/Relocation.cpp15
-rw-r--r--bolt/lib/Passes/PAuthGadgetScanner.cpp172
-rw-r--r--bolt/lib/Rewrite/CMakeLists.txt1
-rw-r--r--bolt/lib/Rewrite/DWARFRewriter.cpp2
-rw-r--r--bolt/lib/Rewrite/GNUPropertyRewriter.cpp147
-rw-r--r--bolt/lib/Rewrite/RewriteInstance.cpp16
-rw-r--r--bolt/test/AArch64/Inputs/property-note-bti.yaml50
-rw-r--r--bolt/test/AArch64/Inputs/property-note-nobti.yaml50
-rw-r--r--bolt/test/AArch64/bti-note.test10
-rw-r--r--bolt/test/AArch64/no-bti-note.test10
-rw-r--r--bolt/test/AArch64/tls-desc-call.s35
-rw-r--r--bolt/test/binary-analysis/AArch64/cmdline-args.test1
-rw-r--r--bolt/test/binary-analysis/AArch64/gs-pauth-authentication-oracles.s6
-rw-r--r--bolt/test/binary-analysis/AArch64/gs-pauth-calls.s5
-rw-r--r--bolt/test/binary-analysis/AArch64/gs-pauth-debug-output.s185
-rw-r--r--bolt/test/binary-analysis/AArch64/gs-pauth-signing-oracles.s54
-rw-r--r--bolt/test/binary-analysis/AArch64/gs-pauth-tail-calls.s184
-rw-r--r--clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp2
-rw-r--r--clang-tools-extra/clang-include-fixer/IncludeFixer.cpp2
-rw-r--r--clang-tools-extra/clang-move/Move.cpp2
-rw-r--r--clang-tools-extra/clangd/ConfigCompile.cpp2
-rw-r--r--clang-tools-extra/clangd/SystemIncludeExtractor.cpp2
-rw-r--r--clang-tools-extra/clangd/index/SymbolCollector.cpp2
-rw-r--r--clang-tools-extra/clangd/tool/ClangdMain.cpp2
-rw-r--r--clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp2
-rw-r--r--clang-tools-extra/include-cleaner/unittests/RecordTest.cpp7
-rw-r--r--clang/.clang-format1
-rw-r--r--clang/CMakeLists.txt15
-rw-r--r--clang/cmake/caches/BOLT-CSSPGO.cmake3
-rw-r--r--clang/cmake/caches/BOLT-PGO.cmake3
-rw-r--r--clang/cmake/caches/CSSPGO.cmake2
-rw-r--r--clang/docs/InternalsManual.rst61
-rw-r--r--clang/docs/ReleaseNotes.rst8
-rw-r--r--clang/include/clang/AST/ASTConcept.h33
-rw-r--r--clang/include/clang/AST/ASTContext.h1
-rw-r--r--clang/include/clang/AST/CharUnits.h6
-rw-r--r--clang/include/clang/AST/Decl.h3
-rw-r--r--clang/include/clang/AST/ExprCXX.h8
-rw-r--r--clang/include/clang/AST/HLSLResource.h3
-rw-r--r--clang/include/clang/AST/OpenACCClause.h21
-rw-r--r--clang/include/clang/AST/TypeBase.h17
-rw-r--r--clang/include/clang/AST/TypeProperties.td5
-rw-r--r--clang/include/clang/Analysis/CFG.h1
-rw-r--r--clang/include/clang/Basic/Attr.td6
-rw-r--r--clang/include/clang/Basic/LangOptions.h3
-rw-r--r--clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h7
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIROps.td38
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td6
-rw-r--r--clang/include/clang/CIR/MissingFeatures.h2
-rw-r--r--clang/include/clang/Driver/CommonArgs.h5
-rw-r--r--clang/include/clang/Driver/Options.td9
-rw-r--r--clang/include/clang/Frontend/CompilerInstance.h6
-rw-r--r--clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def8
-rw-r--r--clang/include/clang/Parse/ParseHLSLRootSignature.h3
-rw-r--r--clang/include/clang/Sema/Sema.h111
-rw-r--r--clang/include/clang/Sema/SemaConcept.h434
-rw-r--r--clang/include/clang/Sema/Template.h22
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h2
-rw-r--r--clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h1
-rw-r--r--clang/lib/AST/APValue.cpp2
-rw-r--r--clang/lib/AST/ASTConcept.cpp31
-rw-r--r--clang/lib/AST/ASTImporter.cpp12
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp12
-rw-r--r--clang/lib/AST/Decl.cpp47
-rw-r--r--clang/lib/AST/ItaniumMangle.cpp2
-rw-r--r--clang/lib/AST/RecordLayoutBuilder.cpp9
-rw-r--r--clang/lib/AST/StmtProfile.cpp8
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp3
-rw-r--r--clang/lib/AST/TypePrinter.cpp3
-rw-r--r--clang/lib/Analysis/CFG.cpp7
-rw-r--r--clang/lib/Analysis/ThreadSafety.cpp36
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp22
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp64
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h28
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp37
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp168
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h28
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenRecordLayout.h2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp23
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp11
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp11
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp2
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp56
-rw-r--r--clang/lib/CodeGen/CGExprConstant.cpp2
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp2
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp2
-rw-r--r--clang/lib/CodeGen/CGRecordLayoutBuilder.cpp12
-rw-r--r--clang/lib/CodeGen/Targets/SPIR.cpp6
-rw-r--r--clang/lib/CodeGen/Targets/X86.cpp5
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp90
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp48
-rw-r--r--clang/lib/Driver/ToolChains/Flang.cpp21
-rw-r--r--clang/lib/Driver/ToolChains/HLSL.cpp2
-rw-r--r--clang/lib/ExtractAPI/ExtractAPIConsumer.cpp3
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp50
-rw-r--r--clang/lib/Format/Format.cpp2
-rw-r--r--clang/lib/Format/FormatToken.cpp4
-rw-r--r--clang/lib/Format/FormatToken.h5
-rw-r--r--clang/lib/Format/FormatTokenLexer.cpp4
-rw-r--r--clang/lib/Format/MacroExpander.cpp2
-rw-r--r--clang/lib/Format/NamespaceEndCommentsFixer.cpp4
-rw-r--r--clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp2
-rw-r--r--clang/lib/Format/QualifierAlignmentFixer.cpp2
-rw-r--r--clang/lib/Format/SortJavaScriptImports.cpp4
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp106
-rw-r--r--clang/lib/Format/UnwrappedLineFormatter.cpp6
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp42
-rw-r--r--clang/lib/Format/WhitespaceManager.cpp2
-rw-r--r--clang/lib/Frontend/ChainedIncludesSource.cpp2
-rw-r--r--clang/lib/Frontend/CompilerInstance.cpp13
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp11
-rw-r--r--clang/lib/Lex/HeaderSearch.cpp4
-rw-r--r--clang/lib/Parse/ParseDecl.cpp3
-rw-r--r--clang/lib/Parse/ParseHLSLRootSignature.cpp61
-rw-r--r--clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp110
-rw-r--r--clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h16
-rw-r--r--clang/lib/Sema/HLSLExternalSemaSource.cpp26
-rw-r--r--clang/lib/Sema/SemaChecking.cpp25
-rw-r--r--clang/lib/Sema/SemaConcept.cpp2000
-rw-r--r--clang/lib/Sema/SemaDeclCXX.cpp18
-rw-r--r--clang/lib/Sema/SemaExpr.cpp7
-rw-r--r--clang/lib/Sema/SemaExprCXX.cpp20
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp11
-rw-r--r--clang/lib/Sema/SemaInit.cpp5
-rw-r--r--clang/lib/Sema/SemaOpenACC.cpp30
-rw-r--r--clang/lib/Sema/SemaOverload.cpp6
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp93
-rw-r--r--clang/lib/Sema/SemaTemplateDeduction.cpp51
-rw-r--r--clang/lib/Sema/SemaTemplateDeductionGuide.cpp48
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp168
-rw-r--r--clang/lib/Sema/TreeTransform.h19
-rw-r--r--clang/lib/Serialization/ASTReader.cpp15
-rw-r--r--clang/lib/Serialization/ASTReaderDecl.cpp2
-rw-r--r--clang/lib/Serialization/ASTReaderStmt.cpp14
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp9
-rw-r--r--clang/lib/Serialization/ASTWriterStmt.cpp18
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Core/CallEvent.cpp31
-rw-r--r--clang/lib/StaticAnalyzer/Core/RegionStore.cpp10
-rw-r--r--clang/lib/StaticAnalyzer/Core/Store.cpp2
-rw-r--r--clang/lib/Testing/TestAST.cpp2
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp13
-rw-r--r--clang/lib/Tooling/Tooling.cpp2
-rw-r--r--clang/test/AST/ByteCode/const-eval.c2
-rw-r--r--clang/test/AST/ByteCode/literals.cpp2
-rw-r--r--clang/test/AST/HLSL/RootSignature-Target-AST.hlsl12
-rw-r--r--clang/test/AST/HLSL/RootSignatures-AST.hlsl27
-rw-r--r--clang/test/AST/HLSL/StructuredBuffers-AST.hlsl23
-rw-r--r--clang/test/AST/ast-dump-concepts.cpp10
-rw-r--r--clang/test/AST/ast-dump-ctad-alias.cpp21
-rw-r--r--clang/test/Analysis/Checkers/WebKit/objc-mock-types.h36
-rw-r--r--clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm12
-rw-r--r--clang/test/Analysis/initializer.cpp48
-rw-r--r--clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c10
-rw-r--r--clang/test/CIR/CodeGen/array-ctor.cpp6
-rw-r--r--clang/test/CIR/CodeGen/array-dtor.cpp8
-rw-r--r--clang/test/CIR/CodeGen/array.cpp32
-rw-r--r--clang/test/CIR/CodeGen/assign-operator.cpp6
-rw-r--r--clang/test/CIR/CodeGen/basic.c2
-rw-r--r--clang/test/CIR/CodeGen/basic.cpp2
-rw-r--r--clang/test/CIR/CodeGen/binassign.c2
-rw-r--r--clang/test/CIR/CodeGen/binop.c6
-rw-r--r--clang/test/CIR/CodeGen/binop.cpp12
-rw-r--r--clang/test/CIR/CodeGen/builtin_bit.cpp38
-rw-r--r--clang/test/CIR/CodeGen/builtin_call.cpp8
-rw-r--r--clang/test/CIR/CodeGen/builtin_printf.cpp4
-rw-r--r--clang/test/CIR/CodeGen/cast.cpp30
-rw-r--r--clang/test/CIR/CodeGen/cmp.cpp4
-rw-r--r--clang/test/CIR/CodeGen/comma.c2
-rw-r--r--clang/test/CIR/CodeGen/complex-cast.cpp80
-rw-r--r--clang/test/CIR/CodeGen/complex-compound-assignment.cpp28
-rw-r--r--clang/test/CIR/CodeGen/complex-mul-div.cpp24
-rw-r--r--clang/test/CIR/CodeGen/complex-unary.cpp24
-rw-r--r--clang/test/CIR/CodeGen/complex.cpp46
-rw-r--r--clang/test/CIR/CodeGen/cxx-default-init.cpp6
-rw-r--r--clang/test/CIR/CodeGen/delegating-ctor.cpp12
-rw-r--r--clang/test/CIR/CodeGen/delete.cpp4
-rw-r--r--clang/test/CIR/CodeGen/destructors.cpp4
-rw-r--r--clang/test/CIR/CodeGen/finegrain-bitfield-access.cpp16
-rw-r--r--clang/test/CIR/CodeGen/if.cpp6
-rw-r--r--clang/test/CIR/CodeGen/int-to-bool.cpp8
-rw-r--r--clang/test/CIR/CodeGen/loop.cpp12
-rw-r--r--clang/test/CIR/CodeGen/new.cpp16
-rw-r--r--clang/test/CIR/CodeGen/no-prototype.c6
-rw-r--r--clang/test/CIR/CodeGen/opaque.c4
-rw-r--r--clang/test/CIR/CodeGen/opaque.cpp6
-rw-r--r--clang/test/CIR/CodeGen/pointers.cpp2
-rw-r--r--clang/test/CIR/CodeGen/struct.cpp29
-rw-r--r--clang/test/CIR/CodeGen/ternary.cpp2
-rw-r--r--clang/test/CIR/CodeGen/unary.cpp32
-rw-r--r--clang/test/CIR/CodeGen/union.c10
-rw-r--r--clang/test/CIR/CodeGen/var_arg.c12
-rw-r--r--clang/test/CIR/CodeGen/variable-decomposition.cpp2
-rw-r--r--clang/test/CIR/CodeGen/vbase.cpp10
-rw-r--r--clang/test/CIR/CodeGen/vector-ext.cpp8
-rw-r--r--clang/test/CIR/CodeGen/vector.cpp8
-rw-r--r--clang/test/CIR/CodeGen/vtt.cpp38
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-copy.c2
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp74
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp34
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp36
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp36
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined.cpp6
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c36
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp74
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-private-clause.c3
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp34
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp36
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp36
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/data.c4
-rw-r--r--clang/test/CIR/CodeGenOpenACC/host_data.c4
-rw-r--r--clang/test/CIR/CodeGenOpenACC/init.c6
-rw-r--r--clang/test/CIR/CodeGenOpenACC/kernels.c10
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp34
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp36
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp18
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp36
-rw-r--r--clang/test/CIR/CodeGenOpenACC/parallel.c10
-rw-r--r--clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-CtorDtor.cpp261
-rw-r--r--clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-NoOps.cpp234
-rw-r--r--clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-int.cpp4
-rw-r--r--clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp1040
-rw-r--r--clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp1025
-rw-r--r--clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp500
-rw-r--r--clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp425
-rw-r--r--clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp429
-rw-r--r--clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp274
-rw-r--r--clang/test/CIR/CodeGenOpenACC/serial.c10
-rw-r--r--clang/test/CIR/CodeGenOpenACC/set.c4
-rw-r--r--clang/test/CIR/CodeGenOpenACC/shutdown.c6
-rw-r--r--clang/test/CIR/CodeGenOpenACC/wait.c6
-rw-r--r--clang/test/CIR/IR/alloca.cir6
-rw-r--r--clang/test/CIR/IR/array-ctor.cir2
-rw-r--r--clang/test/CIR/IR/array-dtor.cir2
-rw-r--r--clang/test/CIR/IR/array.cir2
-rw-r--r--clang/test/CIR/IR/atomic.cir2
-rw-r--r--clang/test/CIR/IR/binassign.cir6
-rw-r--r--clang/test/CIR/IR/bitfield_info.cir2
-rw-r--r--clang/test/CIR/IR/call.cir2
-rw-r--r--clang/test/CIR/IR/cast.cir10
-rw-r--r--clang/test/CIR/IR/cmp.cir50
-rw-r--r--clang/test/CIR/IR/complex.cir2
-rw-r--r--clang/test/CIR/IR/copy.cir2
-rw-r--r--clang/test/CIR/IR/func.cir2
-rw-r--r--clang/test/CIR/IR/global-init.cir2
-rw-r--r--clang/test/CIR/IR/global-var-linkage.cir3
-rw-r--r--clang/test/CIR/IR/global.cir2
-rw-r--r--clang/test/CIR/IR/label.cir2
-rw-r--r--clang/test/CIR/IR/module.cir3
-rw-r--r--clang/test/CIR/IR/stack-save-restore.cir2
-rw-r--r--clang/test/CIR/IR/struct.cir2
-rw-r--r--clang/test/CIR/IR/switch-flat.cir2
-rw-r--r--clang/test/CIR/IR/switch.cir2
-rw-r--r--clang/test/CIR/IR/ternary.cir2
-rw-r--r--clang/test/CIR/IR/throw.cir2
-rw-r--r--clang/test/CIR/IR/unary.cir2
-rw-r--r--clang/test/CIR/IR/vector.cir2
-rw-r--r--clang/test/CIR/IR/vtable-addrpt.cir4
-rw-r--r--clang/test/CIR/IR/vtable-attr.cir2
-rw-r--r--clang/test/CIR/IR/vtt-addrpoint.cir4
-rw-r--r--clang/test/CIR/Lowering/cast.cir34
-rw-r--r--clang/test/CIR/Lowering/if.cir8
-rw-r--r--clang/test/CIR/Lowering/vtt-addrpoint.cir2
-rw-r--r--clang/test/CIR/Transforms/canonicalize.cir28
-rw-r--r--clang/test/CIR/Transforms/if.cir8
-rw-r--r--clang/test/CIR/Transforms/switch.cir8
-rw-r--r--clang/test/CXX/drs/cwg25xx.cpp14
-rw-r--r--clang/test/CXX/expr/expr.prim/expr.prim.id/p3.cpp3
-rw-r--r--clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp14
-rw-r--r--clang/test/CXX/expr/expr.prim/expr.prim.req/nested-requirement.cpp35
-rw-r--r--clang/test/CXX/expr/expr.prim/expr.prim.req/simple-requirement.cpp4
-rw-r--r--clang/test/CXX/expr/expr.prim/expr.prim.req/type-requirement.cpp12
-rw-r--r--clang/test/CXX/temp/temp.constr/temp.constr.atomic/constrant-satisfaction-conversions.cpp5
-rw-r--r--clang/test/CXX/temp/temp.constr/temp.constr.normal/p1.cpp59
-rw-r--r--clang/test/CXX/temp/temp.param/p10-2a.cpp23
-rw-r--r--clang/test/CodeGen/X86/avx-cxx-record.cpp23
-rw-r--r--clang/test/CodeGen/X86/avx512ifma-builtins.c5
-rw-r--r--clang/test/CodeGen/X86/avx512ifmavl-builtins.c6
-rw-r--r--clang/test/CodeGen/X86/avxifma-builtins.c6
-rw-r--r--clang/test/CodeGenHLSL/RootSignature.hlsl5
-rw-r--r--clang/test/CodeGenHLSL/resources/RWStructuredBuffer-elementtype.hlsl47
-rw-r--r--clang/test/CodeGenHLSL/resources/RasterizerOrderedStructuredBuffer-elementtype.hlsl26
-rw-r--r--clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl4
-rw-r--r--clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl6
-rw-r--r--clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl2
-rw-r--r--clang/test/CodeGenHLSL/resources/resource-bindings.hlsl2
-rw-r--r--clang/test/Driver/riscv-cpus.c3
-rw-r--r--clang/test/OpenMP/amdgcn_save_temps.c23
-rw-r--r--clang/test/Parser/recovery-after-expected-unqualified-id.cpp9
-rw-r--r--clang/test/Sema/const-eval.c2
-rw-r--r--clang/test/Sema/integer-overflow.c2
-rw-r--r--clang/test/Sema/unbounded-array-bounds.c48
-rw-r--r--clang/test/SemaCXX/array-bounds.cpp4
-rw-r--r--clang/test/SemaCXX/constant-expression-cxx14.cpp2
-rw-r--r--clang/test/SemaCXX/cxx20-ctad-type-alias.cpp32
-rw-r--r--clang/test/SemaCXX/cxx23-assume.cpp9
-rw-r--r--clang/test/SemaCXX/cxx2b-deducing-this.cpp8
-rw-r--r--clang/test/SemaCXX/cxx2c-fold-exprs.cpp202
-rw-r--r--clang/test/SemaCXX/cxx2c-template-template-param.cpp4
-rw-r--r--clang/test/SemaCXX/cxx98-compat.cpp19
-rw-r--r--clang/test/SemaCXX/integer-overflow.cpp2
-rw-r--r--clang/test/SemaCXX/invalid-requirement-requires-expr.cpp4
-rw-r--r--clang/test/SemaCXX/overload-resolution-deferred-templates.cpp3
-rw-r--r--clang/test/SemaCXX/type-traits.cpp4
-rw-r--r--clang/test/SemaHLSL/BuiltIns/Buffers.hlsl6
-rw-r--r--clang/test/SemaHLSL/BuiltIns/RWBuffers.hlsl6
-rw-r--r--clang/test/SemaHLSL/RootSignature-err.hlsl4
-rw-r--r--clang/test/SemaHLSL/RootSignature-flags-err.hlsl24
-rw-r--r--clang/test/SemaTemplate/GH161657.cpp11
-rw-r--r--clang/test/SemaTemplate/concepts-recovery-expr.cpp32
-rw-r--r--clang/test/SemaTemplate/concepts-recursive-inst.cpp27
-rw-r--r--clang/test/SemaTemplate/concepts.cpp71
-rw-r--r--clang/test/SemaTemplate/deduction-guide.cpp15
-rw-r--r--clang/test/SemaTemplate/instantiate-abbreviated-template.cpp1
-rw-r--r--clang/test/SemaTemplate/instantiate-expanded-type-constraint.cpp4
-rw-r--r--clang/test/SemaTemplate/instantiate-requires-expr.cpp20
-rw-r--r--clang/test/SemaTemplate/instantiate-template-argument.cpp97
-rw-r--r--clang/test/SemaTemplate/pr52970.cpp2
-rw-r--r--clang/tools/clang-import-test/clang-import-test.cpp2
-rw-r--r--clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp8
-rw-r--r--clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp15
-rw-r--r--clang/tools/libclang/CIndex.cpp9
-rw-r--r--clang/unittests/Analysis/CFGTest.cpp153
-rw-r--r--clang/unittests/CodeGen/TestCompiler.h2
-rw-r--r--clang/unittests/Frontend/CompilerInstanceTest.cpp2
-rw-r--r--clang/unittests/Lex/LexHLSLRootSignatureTest.cpp3
-rw-r--r--clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp34
-rw-r--r--clang/unittests/Serialization/ForceCheckFileInputTest.cpp4
-rw-r--r--clang/unittests/StaticAnalyzer/CallEventTest.cpp41
-rw-r--r--clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp2
-rw-r--r--clang/utils/TableGen/RISCVVEmitter.cpp8
-rw-r--r--clang/utils/perf-training/CMakeLists.txt36
-rw-r--r--clang/utils/perf-training/perf-helper.py65
-rw-r--r--compiler-rt/lib/builtins/CMakeLists.txt11
-rw-r--r--compiler-rt/lib/builtins/cpu_model/aarch64.c6
-rw-r--r--compiler-rt/lib/builtins/cpu_model/aarch64/fmv/hwcap.inc (renamed from compiler-rt/lib/builtins/cpu_model/aarch64/fmv/mrs.inc)3
-rw-r--r--compiler-rt/lib/builtins/wasm/__c_longjmp.S26
-rw-r--r--compiler-rt/lib/builtins/wasm/__cpp_exception.S26
-rw-r--r--compiler-rt/test/asan/TestCases/wcscat.cpp20
-rw-r--r--compiler-rt/test/asan/TestCases/wcscpy.cpp20
-rw-r--r--compiler-rt/test/asan/TestCases/wcsncat.cpp20
-rw-r--r--compiler-rt/test/asan/TestCases/wcsncpy.cpp20
-rw-r--r--flang-rt/lib/runtime/character.cpp4
-rw-r--r--flang-rt/lib/runtime/derived-api.cpp20
-rw-r--r--flang/docs/ComplexOperations.md4
-rw-r--r--flang/docs/FlangDriver.md3
-rw-r--r--flang/include/flang/Lower/OpenACC.h3
-rw-r--r--flang/include/flang/Lower/SymbolMap.h4
-rw-r--r--flang/include/flang/Optimizer/Builder/FIRBuilder.h9
-rw-r--r--flang/include/flang/Semantics/openmp-utils.h2
-rw-r--r--flang/include/flang/Semantics/symbol.h2
-rw-r--r--flang/include/flang/Support/LangOptions.def3
-rw-r--r--flang/lib/Frontend/CompilerInvocation.cpp3
-rw-r--r--flang/lib/Frontend/FrontendActions.cpp8
-rw-r--r--flang/lib/Lower/Bridge.cpp2
-rw-r--r--flang/lib/Lower/OpenACC.cpp34
-rw-r--r--flang/lib/Lower/SymbolMap.cpp10
-rw-r--r--flang/lib/Optimizer/Builder/FIRBuilder.cpp24
-rw-r--r--flang/lib/Optimizer/Builder/IntrinsicCall.cpp38
-rw-r--r--flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp294
-rw-r--r--flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp7
-rw-r--r--flang/lib/Parser/parsing.cpp1
-rw-r--r--flang/lib/Parser/prescan.cpp5
-rw-r--r--flang/lib/Semantics/check-declarations.cpp3
-rw-r--r--flang/lib/Semantics/check-omp-structure.cpp12
-rw-r--r--flang/lib/Semantics/openmp-utils.cpp18
-rw-r--r--flang/lib/Semantics/resolve-directives.cpp40
-rw-r--r--flang/lib/Semantics/resolve-names.cpp65
-rw-r--r--flang/test/Driver/complex-range.f9077
-rw-r--r--flang/test/Driver/fast-real-mod.f907
-rw-r--r--flang/test/Integration/debug-complex-1.f904
-rw-r--r--flang/test/Integration/debug-local-var-2.f906
-rw-r--r--flang/test/Lower/Intrinsics/fast-real-mod.f9083
-rw-r--r--flang/test/Lower/OpenACC/acc-host-data-cuda-device.f9043
-rw-r--r--flang/test/Lower/OpenMP/declare-mapper.f9039
-rw-r--r--flang/test/Lower/OpenMP/wsloop-collapse-continue.f9019
-rw-r--r--flang/test/Semantics/OpenACC/acc-sentinel.f9014
-rw-r--r--flang/test/Semantics/OpenMP/declare-simd.f905
-rw-r--r--flang/test/Semantics/OpenMP/do08.f901
-rw-r--r--flang/test/Semantics/OpenMP/do13.f901
-rw-r--r--flang/test/Transforms/debug-complex-1.fir4
-rw-r--r--flang/test/Transforms/debug-derived-type-1.fir6
-rw-r--r--flang/test/Transforms/debug-fn-info.fir6
-rw-r--r--flang/test/Transforms/debug-local-var.fir6
-rw-r--r--flang/test/Transforms/debug-ref-type.fir2
-rw-r--r--flang/test/Transforms/debug-tuple-type.fir2
-rw-r--r--flang/test/Transforms/debug-vector-type.fir12
-rw-r--r--flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp84
-rw-r--r--libc/config/linux/aarch64/entrypoints.txt2
-rw-r--r--libc/config/linux/x86_64/entrypoints.txt1
-rw-r--r--libc/fuzzing/stdlib/strtointeger_differential_fuzz.cpp4
-rw-r--r--libc/include/llvm-libc-macros/linux/fcntl-macros.h3
-rw-r--r--libc/include/sys/syscall.h.def4
-rw-r--r--libc/include/unistd.yaml9
-rw-r--r--libc/shared/math.h1
-rw-r--r--libc/shared/math/exp10m1f16.h29
-rw-r--r--libc/src/__support/macros/attributes.h10
-rw-r--r--libc/src/__support/math/CMakeLists.txt17
-rw-r--r--libc/src/__support/math/exp10m1f16.h185
-rw-r--r--libc/src/math/generic/CMakeLists.txt13
-rw-r--r--libc/src/math/generic/exp10m1f16.cpp158
-rw-r--r--libc/src/string/CMakeLists.txt1
-rw-r--r--libc/src/string/memory_utils/aarch64/inline_strlen.h2
-rw-r--r--libc/src/string/memory_utils/generic/inline_strlen.h3
-rw-r--r--libc/src/string/memory_utils/x86_64/inline_strlen.h4
-rw-r--r--libc/src/string/string_utils.h3
-rw-r--r--libc/src/unistd/CMakeLists.txt7
-rw-r--r--libc/src/unistd/faccessat.h20
-rw-r--r--libc/src/unistd/linux/CMakeLists.txt13
-rw-r--r--libc/src/unistd/linux/access.cpp2
-rw-r--r--libc/src/unistd/linux/faccessat.cpp37
-rw-r--r--libc/test/shared/CMakeLists.txt1
-rw-r--r--libc/test/shared/shared_math_test.cpp1
-rw-r--r--libc/test/src/unistd/CMakeLists.txt17
-rw-r--r--libc/test/src/unistd/faccessat_test.cpp115
-rw-r--r--libcxx/docs/index.rst2
-rw-r--r--libcxx/include/CMakeLists.txt2
-rw-r--r--libcxx/include/__algorithm/comp.h4
-rw-r--r--libcxx/include/__algorithm/find.h2
-rw-r--r--libcxx/include/__functional/is_transparent.h8
-rw-r--r--libcxx/include/__functional/operations.h18
-rw-r--r--libcxx/include/__functional/ranges_operations.h7
-rw-r--r--libcxx/include/__tree4
-rw-r--r--libcxx/include/__type_traits/is_generic_transparent_comparator.h30
-rw-r--r--libcxx/include/__type_traits/make_transparent.h48
-rw-r--r--libcxx/include/map54
-rw-r--r--libcxx/include/module.modulemap.in2
-rw-r--r--libcxx/include/string16
-rw-r--r--libcxx/test/benchmarks/containers/associative/map.bench.cpp13
-rw-r--r--libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp13
-rw-r--r--libcxx/test/libcxx/algorithms/cpp17_iterator_concepts.verify.cpp4
-rw-r--r--libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp3
-rw-r--r--libcxx/test/std/language.support/support.exception/propagation/make_exception_ptr.objc.pass.mm3
-rwxr-xr-xlibcxx/utils/find-rerun-candidates242
-rw-r--r--libcxx/utils/libcxx/test/params.py3
-rwxr-xr-xlibcxx/utils/visualize-historical17
-rw-r--r--libunwind/test/configs/cmake-bridge.cfg.in11
-rw-r--r--libunwind/test/eh_frame_fde_pc_range.pass.cpp7
-rw-r--r--lld/COFF/Driver.cpp10
-rw-r--r--lld/COFF/Options.td4
-rw-r--r--lld/ELF/Driver.cpp5
-rw-r--r--lld/ELF/Options.td6
-rw-r--r--lldb/include/lldb/Core/Mangled.h10
-rw-r--r--lldb/include/lldb/Target/Statistics.h2
-rw-r--r--lldb/packages/Python/lldbsuite/test/cpu_feature.py2
-rw-r--r--lldb/source/API/SBTarget.cpp1
-rw-r--r--lldb/source/Commands/CommandObjectTarget.cpp6
-rw-r--r--lldb/source/Core/Mangled.cpp2
-rw-r--r--lldb/source/Expression/IRExecutionUnit.cpp7
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp2
-rw-r--r--lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp8
-rw-r--r--lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp180
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp8
-rw-r--r--lldb/source/Target/Statistics.cpp5
-rw-r--r--lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/unordered_map-iterator/TestDataFormatterStdUnorderedMap.py5
-rw-r--r--lldb/test/API/functionalities/json/symbol-file/Makefile1
-rw-r--r--lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py28
-rw-r--r--lldb/test/API/functionalities/stats_api/arm64-minidump-build-ids.yaml19
-rw-r--r--lldb/test/API/lang/cpp/abi_tag_structors/TestAbiTagStructors.py33
-rw-r--r--lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py20
-rw-r--r--lldb/test/API/lang/cpp/floating-types-specialization/Makefile3
-rw-r--r--lldb/test/API/lang/cpp/floating-types-specialization/TestCppFloatingTypesSpecialization.py36
-rw-r--r--lldb/test/API/lang/cpp/floating-types-specialization/main.cpp11
-rw-r--r--lldb/test/API/lang/cpp/function-call-from-object-file/Makefile3
-rw-r--r--lldb/test/API/lang/cpp/function-call-from-object-file/TestFunctionCallFromObjectFile.py29
-rw-r--r--lldb/test/API/lang/cpp/function-call-from-object-file/common.h8
-rw-r--r--lldb/test/API/lang/cpp/function-call-from-object-file/lib1.cpp8
-rw-r--r--lldb/test/API/lang/cpp/function-call-from-object-file/lib2.cpp6
-rw-r--r--lldb/test/API/lang/cpp/function-call-from-object-file/main.cpp10
-rw-r--r--lldb/test/API/lang/cpp/structured-binding/TestStructuredBinding.py31
-rw-r--r--lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py9
-rw-r--r--lldb/test/API/macosx/mte/Makefile12
-rw-r--r--lldb/test/API/macosx/mte/TestDarwinMTE.py110
-rw-r--r--lldb/test/API/macosx/mte/main.c28
-rw-r--r--lldb/test/API/macosx/mte/mte-entitlements.plist10
-rw-r--r--lldb/test/Shell/Expr/TestGlobalSymbolObjCConflict.c35
-rw-r--r--lldb/test/Shell/SymbolFile/NativePDB/symtab.cpp2
-rw-r--r--lldb/test/Shell/lit.cfg.py2
-rw-r--r--lldb/tools/debugserver/source/DNB.cpp10
-rw-r--r--lldb/tools/debugserver/source/DNB.h3
-rw-r--r--lldb/tools/debugserver/source/DNBDefs.h3
-rw-r--r--lldb/tools/debugserver/source/MacOSX/MachTask.h2
-rw-r--r--lldb/tools/debugserver/source/MacOSX/MachTask.mm17
-rw-r--r--lldb/tools/debugserver/source/MacOSX/MachVMMemory.cpp59
-rw-r--r--lldb/tools/debugserver/source/MacOSX/MachVMMemory.h2
-rw-r--r--lldb/tools/debugserver/source/MacOSX/MachVMRegion.cpp46
-rw-r--r--lldb/tools/debugserver/source/MacOSX/MachVMRegion.h3
-rw-r--r--lldb/tools/debugserver/source/RNBRemote.cpp105
-rw-r--r--lldb/tools/debugserver/source/RNBRemote.h2
-rw-r--r--llvm/.clang-format2
-rw-r--r--llvm/CMakeLists.txt3
-rw-r--r--llvm/Maintainers.md7
-rw-r--r--llvm/cmake/modules/HandleLLVMOptions.cmake30
-rw-r--r--llvm/docs/AMDGPU/AMDGPUAsmGFX12.rst2002
-rw-r--r--llvm/docs/AMDGPU/gfx12_addr.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_attr.rst28
-rw-r--r--llvm/docs/AMDGPU/gfx12_clause.rst7
-rw-r--r--llvm/docs/AMDGPU/gfx12_data0_56f215.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_data0_6802ce.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_data0_e016a1.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_data0_fd235e.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_data1_6802ce.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_data1_731030.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_data1_e016a1.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_data1_fd235e.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_delay.rst74
-rw-r--r--llvm/docs/AMDGPU/gfx12_hwreg.rst76
-rw-r--r--llvm/docs/AMDGPU/gfx12_imm16.rst7
-rw-r--r--llvm/docs/AMDGPU/gfx12_ioffset.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_label.rst29
-rw-r--r--llvm/docs/AMDGPU/gfx12_literal_1f74c7.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_literal_81e671.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_m.rst13
-rw-r--r--llvm/docs/AMDGPU/gfx12_rsrc_5fe6d8.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_rsrc_c9f929.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_saddr_cdc95c.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_saddr_d42b64.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_samp.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_sbase_453b95.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sbase_47adb7.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdata_0974a4.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdata_354189.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdata_4585b8.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdata_5c7b50.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdata_6c003b.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdata_836716.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdata_d725ab.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdata_dd9dd8.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdst_006c40.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdst_20064d.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdst_354189.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdst_836716.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdst_ced58d.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sdst_e701cc.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_sendmsg.rst48
-rw-r--r--llvm/docs/AMDGPU/gfx12_sendmsg_rtn.rst30
-rw-r--r--llvm/docs/AMDGPU/gfx12_simm16_15ccdd.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_simm16_218bea.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_simm16_39b593.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_simm16_3d2a4f.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_simm16_730a13.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_simm16_7ed651.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_simm16_81e671.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_simm16_c98889.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_simm16_cc1716.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_simm16_ee8b30.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_soffset_8ec073.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_soffset_c5b88c.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_soffset_ec005a.rst20
-rw-r--r--llvm/docs/AMDGPU/gfx12_src0_5727cf.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src0_5cae62.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src0_6802ce.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src0_85aab6.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src0_c4593f.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src0_e016a1.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src0_fd235e.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src1_5727cf.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src1_5cae62.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src1_6802ce.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src1_731030.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src1_977794.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src1_c4593f.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src1_e016a1.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src1_fd235e.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src2_2797bc.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src2_5727cf.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src2_5cae62.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src2_6802ce.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src2_7b936a.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src2_96fbd3.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src2_c4593f.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_src2_e016a1.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_srcx0.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_srcy0.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_ssrc0_007f9c.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_ssrc0_1a9ca5.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_ssrc0_245536.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_ssrc0_2797bc.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_ssrc0_bbb4c6.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_ssrc0_c4593f.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_ssrc1_bbb4c6.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_ssrc1_c4593f.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_tgt.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vaddr_a972b9.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_vaddr_c12f43.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_vaddr_c8b8d4.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_vaddr_d82160.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_vaddr_f2b449.rst15
-rw-r--r--llvm/docs/AMDGPU/gfx12_vcc.rst16
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdata_2eda77.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdata_48e42f.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdata_69a144.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdata_89680f.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdata_aac3e8.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdata_bdb32f.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdst_006c40.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdst_227281.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdst_2eda77.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdst_47d3bc.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdst_48e42f.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdst_69a144.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdst_7de8e7.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdst_836716.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdst_89680f.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdst_bdb32f.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdstx.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vdsty.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_version.rst7
-rw-r--r--llvm/docs/AMDGPU/gfx12_vsrc0.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vsrc1_6802ce.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vsrc1_fd235e.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vsrc2.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vsrc3.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vsrc_56f215.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vsrc_6802ce.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vsrc_89fd7b.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vsrc_e016a1.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vsrc_fd235e.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vsrcx1.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_vsrcy1.rst17
-rw-r--r--llvm/docs/AMDGPU/gfx12_waitcnt.rst55
-rw-r--r--llvm/docs/AMDGPUModifierSyntax.rst109
-rw-r--r--llvm/docs/AMDGPUOperandSyntax.rst11
-rw-r--r--llvm/docs/AMDGPUUsage.rst2
-rw-r--r--llvm/docs/DirectXUsage.rst8
-rw-r--r--llvm/docs/GettingInvolved.rst10
-rw-r--r--llvm/docs/LangRef.rst29
-rw-r--r--llvm/docs/TableGen/ProgRef.rst22
-rw-r--r--llvm/include/llvm/ADT/BitVector.h5
-rw-r--r--llvm/include/llvm/ADT/ConcurrentHashtable.h5
-rw-r--r--llvm/include/llvm/ADT/DirectedGraph.h10
-rw-r--r--llvm/include/llvm/ADT/IntervalTree.h3
-rw-r--r--llvm/include/llvm/ADT/SmallPtrSet.h10
-rw-r--r--llvm/include/llvm/ADT/SmallVector.h33
-rw-r--r--llvm/include/llvm/Analysis/IR2Vec.h272
-rw-r--r--llvm/include/llvm/Analysis/MemoryProfileInfo.h8
-rw-r--r--llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h23
-rw-r--r--llvm/include/llvm/BinaryFormat/DXContainer.h1
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h4
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h7
-rw-r--r--llvm/include/llvm/CodeGen/MIRYamlMapping.h2
-rw-r--r--llvm/include/llvm/CodeGen/MachineFrameInfo.h13
-rw-r--r--llvm/include/llvm/CodeGen/TargetFrameLowering.h1
-rw-r--r--llvm/include/llvm/CodeGen/TargetLowering.h7
-rw-r--r--llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h1
-rw-r--r--llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h7
-rw-r--r--llvm/include/llvm/IR/FixedMetadataKinds.def1
-rw-r--r--llvm/include/llvm/IR/Instructions.h7
-rw-r--r--llvm/include/llvm/IR/IntrinsicInst.h20
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAArch64.td30
-rw-r--r--llvm/include/llvm/IR/Metadata.h8
-rw-r--r--llvm/include/llvm/IR/ProfDataUtils.h8
-rw-r--r--llvm/include/llvm/IR/ValueMap.h105
-rw-r--r--llvm/include/llvm/Object/ELF.h6
-rw-r--r--llvm/include/llvm/Support/FileSystem.h12
-rw-r--r--llvm/include/llvm/Support/Format.h14
-rw-r--r--llvm/include/llvm/Support/FormatProviders.h19
-rw-r--r--llvm/include/llvm/Support/FormatVariadicDetails.h3
-rw-r--r--llvm/include/llvm/Support/HashBuilder.h3
-rw-r--r--llvm/include/llvm/Support/InstructionCost.h16
-rw-r--r--llvm/include/llvm/Support/Path.h12
-rw-r--r--llvm/include/llvm/Support/TargetOpcodes.def3
-rw-r--r--llvm/include/llvm/Target/GenericOpcodes.td7
-rw-r--r--llvm/include/llvm/Target/TargetMachine.h4
-rw-r--r--llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h7
-rw-r--r--llvm/lib/Analysis/CaptureTracking.cpp6
-rw-r--r--llvm/lib/Analysis/CtxProfAnalysis.cpp4
-rw-r--r--llvm/lib/Analysis/IR2Vec.cpp353
-rw-r--r--llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp4
-rw-r--r--llvm/lib/Analysis/InlineAdvisor.cpp2
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp28
-rw-r--r--llvm/lib/Analysis/MemoryProfileInfo.cpp26
-rw-r--r--llvm/lib/Analysis/ModuleSummaryAnalysis.cpp2
-rw-r--r--llvm/lib/Analysis/ProfileSummaryInfo.cpp4
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp7
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp35
-rw-r--r--llvm/lib/CAS/OnDiskTrieRawHashMap.cpp5
-rw-r--r--llvm/lib/CGData/CodeGenData.cpp3
-rw-r--r--llvm/lib/CGData/CodeGenDataReader.cpp4
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp6
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp7
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp7
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp63
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp2
-rw-r--r--llvm/lib/CodeGen/MachineRegionInfo.cpp3
-rw-r--r--llvm/lib/CodeGen/PeepholeOptimizer.cpp22
-rw-r--r--llvm/lib/CodeGen/RegAllocGreedy.cpp71
-rw-r--r--llvm/lib/CodeGen/RegAllocScore.cpp4
-rw-r--r--llvm/lib/CodeGen/RegisterCoalescer.cpp17
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp24
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp50
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp5
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp4
-rw-r--r--llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp2
-rw-r--r--llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp2
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/JITLink.cpp3
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp30
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp3
-rw-r--r--llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp9
-rw-r--r--llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp14
-rw-r--r--llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp2
-rw-r--r--llvm/lib/IR/AsmWriter.cpp469
-rw-r--r--llvm/lib/IR/Instruction.cpp4
-rw-r--r--llvm/lib/IR/Instructions.cpp17
-rw-r--r--llvm/lib/IR/Metadata.cpp35
-rw-r--r--llvm/lib/IR/ProfDataUtils.cpp38
-rw-r--r--llvm/lib/IR/Value.cpp2
-rw-r--r--llvm/lib/IR/Verifier.cpp25
-rw-r--r--llvm/lib/LTO/LTO.cpp3
-rw-r--r--llvm/lib/Object/OffloadBundle.cpp2
-rw-r--r--llvm/lib/ObjectYAML/DXContainerYAML.cpp2
-rw-r--r--llvm/lib/Passes/PassBuilderPipelines.cpp7
-rw-r--r--llvm/lib/ProfileData/MemProfCommon.cpp4
-rw-r--r--llvm/lib/Support/APFloat.cpp2
-rw-r--r--llvm/lib/Support/Mustache.cpp82
-rw-r--r--llvm/lib/Support/Path.cpp100
-rw-r--r--llvm/lib/Support/ScopedPrinter.cpp17
-rw-r--r--llvm/lib/Support/StringMap.cpp4
-rw-r--r--llvm/lib/Support/VirtualFileSystem.cpp4
-rw-r--r--llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp3
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp492
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.h30
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp16
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp62
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrFormats.td22
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp15
-rw-r--r--llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp20
-rw-r--r--llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h83
-rw-r--r--llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp444
-rw-r--r--llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp15
-rw-r--r--llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td62
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp5
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp38
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp63
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSchedStrategy.h6
-rw-r--r--llvm/lib/Target/AMDGPU/SIFrameLowering.cpp1
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp300
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp53
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h23
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.td16
-rw-r--r--llvm/lib/Target/AMDGPU/SOPInstructions.td2
-rw-r--r--llvm/lib/Target/AMDGPU/VOP1Instructions.td11
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.h10
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp1
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp5
-rw-r--r--llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp85
-rw-r--r--llvm/lib/Target/RISCV/RISCVFrameLowering.cpp1
-rw-r--r--llvm/lib/Target/RISCV/RISCVGISel.td13
-rw-r--r--llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp6
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp10
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoA.td16
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td70
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td2
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td2
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td41
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td20
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td8
-rw-r--r--llvm/lib/Target/RISCV/RISCVProcessors.td81
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp4
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp15
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp43
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp1
-rw-r--r--llvm/lib/Target/TargetMachine.cpp2
-rw-r--r--llvm/lib/Target/VE/VEISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86FixupSetCC.cpp6
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp67
-rw-r--r--llvm/lib/Target/X86/X86LowerAMXType.cpp30
-rw-r--r--llvm/lib/Transforms/IPO/FunctionImport.cpp4
-rw-r--r--llvm/lib/Transforms/IPO/FunctionSpecialization.cpp11
-rw-r--r--llvm/lib/Transforms/IPO/GlobalOpt.cpp16
-rw-r--r--llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp5
-rw-r--r--llvm/lib/Transforms/IPO/SampleProfile.cpp11
-rw-r--r--llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp4
-rw-r--r--llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp7
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp48
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineInternal.h1
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp29
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp29
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp44
-rw-r--r--llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp8
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp48
-rw-r--r--llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp4
-rw-r--r--llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc2
-rw-r--r--llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp10
-rw-r--r--llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp26
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp7
-rw-r--r--llvm/lib/Transforms/Utils/FunctionImportUtils.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/Local.cpp6
-rw-r--r--llvm/lib/Transforms/Utils/LoopPeel.cpp121
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp140
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp3
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp133
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp44
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.cpp49
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h11
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanHelpers.h20
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp147
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp12
-rw-r--r--llvm/runtimes/CMakeLists.txt8
-rw-r--r--llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json28
-rw-r--r--llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json28
-rw-r--r--llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json29
-rw-r--r--llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt26
-rw-r--r--llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt26
-rw-r--r--llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt26
-rw-r--r--llvm/test/Analysis/IR2Vec/if-else.ll2
-rw-r--r--llvm/test/Analysis/IR2Vec/unreachable.ll2
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll41
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll126
-rw-r--r--llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll40
-rw-r--r--llvm/test/CMakeLists.txt8
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-modf.mir206
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-modf.mir136
-rw-r--r--llvm/test/CodeGen/AArch64/cbz_wzr.mir260
-rw-r--r--llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir8
-rw-r--r--llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/framelayout-split-sve.mir587
-rw-r--r--llvm/test/CodeGen/AArch64/framelayout-sve.mir12
-rw-r--r--llvm/test/CodeGen/AArch64/freeze.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/llvm.modf.ll459
-rw-r--r--llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll86
-rw-r--r--llvm/test/CodeGen/AArch64/pr161420.ll54
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll21
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll23
-rw-r--r--llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir16
-rw-r--r--llvm/test/CodeGen/AArch64/spillfill-sve.mir10
-rw-r--r--llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll824
-rw-r--r--llvm/test/CodeGen/AArch64/stack-hazard.ll876
-rw-r--r--llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/sve-load-store-legalisation.ll2854
-rw-r--r--llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/tbz-tbnz.ll324
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll104
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll104
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll518
-rw-r--r--llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll1641
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll131
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll96
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll94
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll1689
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll1580
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll1580
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll326
-rw-r--r--llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir48
-rw-r--r--llvm/test/CodeGen/AMDGPU/div_v2i128.ll126
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptoi.i128.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll76
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll47
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll47
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll76
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll721
-rw-r--r--llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll331
-rw-r--r--llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir12
-rw-r--r--llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/limit-coalesce.mir33
-rw-r--r--llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/llc-pipeline.ll60
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll92
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll3
-rw-r--r--llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll89
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/memmove-var-size.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll75
-rw-r--r--llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir148
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll94
-rw-r--r--llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll27
-rw-r--r--llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir12
-rw-r--r--llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir16
-rw-r--r--llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll29
-rw-r--r--llvm/test/CodeGen/ARM/and-mask-variable.ll90
-rw-r--r--llvm/test/CodeGen/ARM/extract-bits.ll4591
-rw-r--r--llvm/test/CodeGen/ARM/extract-lowbits.ll2752
-rw-r--r--llvm/test/CodeGen/ARM/inline-asm-clobber.ll7
-rw-r--r--llvm/test/CodeGen/ARM/llrint-conv.ll69
-rw-r--r--llvm/test/CodeGen/ARM/llvm.exp10.ll16
-rw-r--r--llvm/test/CodeGen/ARM/llvm.frexp.ll36
-rw-r--r--llvm/test/CodeGen/ARM/lrint-conv.ll37
-rw-r--r--llvm/test/CodeGen/ARM/vector-lrint.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll19
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll42
-rw-r--r--llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll4
-rw-r--r--llvm/test/CodeGen/Hexagon/unaligned-vec-store.ll23
-rw-r--r--llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll1
-rw-r--r--llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll145
-rw-r--r--llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll20
-rw-r--r--llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll40
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir7
-rw-r--r--llvm/test/CodeGen/RISCV/cmov-branch-opt.ll109
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/expandload.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll114
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll72
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll80
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/remat.ll132
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/select-bare.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/select-cc.ll59
-rw-r--r--llvm/test/CodeGen/RISCV/select-cond.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/select-const.ll137
-rw-r--r--llvm/test/CodeGen/RISCV/select.ll322
-rw-r--r--llvm/test/CodeGen/RISCV/xqcicli.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/xqcicm.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/xqcics.ll126
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll53
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/UniqueImplicitBindingNumber.ll19
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-cmp-04.ll4
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll41
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vld3.ll414
-rw-r--r--llvm/test/CodeGen/VE/Vector/vec_divrem.ll56
-rw-r--r--llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll78
-rw-r--r--llvm/test/CodeGen/X86/combine-pack.ll49
-rw-r--r--llvm/test/CodeGen/X86/fshl.ll81
-rw-r--r--llvm/test/CodeGen/X86/fshr.ll90
-rw-r--r--llvm/test/CodeGen/X86/pr161693.ll40
-rw-r--r--llvm/test/CodeGen/X86/sbb.ll29
-rw-r--r--llvm/test/CodeGen/X86/shift-i128.ll3
-rw-r--r--llvm/test/DebugInfo/AArch64/asan-stack-vars.mir3
-rw-r--r--llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir3
-rw-r--r--llvm/test/DebugInfo/X86/dynamic-bitfield.ll13
-rw-r--r--llvm/test/DebugInfo/X86/x86fixupsetcc-debug-instr-num.mir54
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s2
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-0.s7
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-1.s7
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch64/MachO_universal_slice_selection.s32
-rw-r--r--llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s2
-rw-r--r--llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s2
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll72
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1.ll346
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_lane.ll358
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_origins.ll42
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-tbl.ll160
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll15
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll15
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vadd.ll238
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vaddv.ll30
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vcvt.ll6
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmax.ll135
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmovn.ll24
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmul.ll386
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vshift.ll296
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst_float.ll434
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll26
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll358
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll416
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll83
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll91
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll90
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll416
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll414
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll178
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll178
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll260
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll440
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll440
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll416
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx-intrinsics-x86.ll108
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll120
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll188
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx2-intrinsics-x86.ll236
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll72
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics-upgrade.ll2228
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics.ll1458
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics-upgrade.ll794
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics.ll370
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-intrinsics.ll235
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-vl-intrinsics.ll221
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-intrinsics.ll264
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl-intrinsics.ll1430
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics-upgrade.ll96
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics.ll96
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics-upgrade.ll48
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics.ll48
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx_vnni-intrinsics.ll32
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint16-intrinsics.ll48
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll72
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll11
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/mmx-intrinsics.ll148
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/sse-intrinsics-x86.ll56
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/sse2-intrinsics-x86.ll78
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/sse41-intrinsics-x86.ll36
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll6
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll18
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll236
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/x86-vpermi2.ll76
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/array_types.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/bmi.ll16
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll2
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/byval.ll24
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/expand-experimental-reductions.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll144
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll108
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll236
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll148
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll36
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll78
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll36
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll396
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll60
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll118
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll48
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll482
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll610
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll14
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll752
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll4
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/or.ll6
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/overflow.ll14
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/pr32842.ll2
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/saturating.ll14
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/scmp.ll44
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/ucmp.ll38
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fadd.ll16
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fmul.ll16
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/vector_arith.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/vscale.ll6
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s588
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s576
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s152
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s84
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s72
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s2008
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s564
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s164
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt9
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt45
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt12
-rw-r--r--llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td132
-rw-r--r--llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td2
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/pr161367.ll31
-rw-r--r--llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll63
-rw-r--r--llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll84
-rw-r--r--llvm/test/Transforms/FunctionAttrs/nocapture.ll68
-rw-r--r--llvm/test/Transforms/GVN/condprop.ll60
-rw-r--r--llvm/test/Transforms/GlobalOpt/fastcc.ll90
-rw-r--r--llvm/test/Transforms/InstCombine/fcmp.ll40
-rw-r--r--llvm/test/Transforms/InstCombine/freeze-phi.ll28
-rw-r--r--llvm/test/Transforms/InstCombine/freeze.ll21
-rw-r--r--llvm/test/Transforms/InstCombine/funnel.ll26
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-clamp.ll295
-rw-r--r--llvm/test/Transforms/InstCombine/in-freeze-phi.ll274
-rw-r--r--llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check-dl.ll50
-rw-r--r--llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll24
-rw-r--r--llvm/test/Transforms/LoopUnroll/peel-branch-weights-freq.ll75
-rw-r--r--llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll64
-rw-r--r--llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll11
-rw-r--r--llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll13
-rw-r--r--llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll11
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll26
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll30
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll30
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll13
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll46
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll51
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll203
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll34
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll93
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll80
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll33
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll51
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll39
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll135
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll45
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll88
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll112
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll50
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll7
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll36
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll36
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll86
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll34
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll35
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll32
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll166
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll152
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll5
-rw-r--r--llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll11
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll30
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll166
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll40
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll321
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/f16.ll13
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll34
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll36
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll60
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll885
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll33
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll36
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll22
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll267
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll60
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll28
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll48
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll67
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll68
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll98
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll59
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll11
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll180
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll72
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll248
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll113
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll11
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll33
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll248
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll50
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll84
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll60
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll182
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll21
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll19
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll89
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/cost-model.ll40
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll11
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll36
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll48
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll23
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll52
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/interleaving.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll274
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/optsize.ll88
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll17
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr34438.ll19
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr81872.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll17
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll460
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/small-size.ll48
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll148
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll50
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll28
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll40
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/bsd_regex.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/check-prof-info.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll13
-rw-r--r--llvm/test/Transforms/LoopVectorize/constantfolder.ll105
-rw-r--r--llvm/test/Transforms/LoopVectorize/create-induction-resume.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/dead_instructions.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll19
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll244
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll189
-rw-r--r--llvm/test/Transforms/LoopVectorize/flags.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/float-induction.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll46
-rw-r--r--llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll46
-rw-r--r--llvm/test/Transforms/LoopVectorize/if-pred-stores.ll59
-rw-r--r--llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/induction-step.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/induction.ll178
-rw-r--r--llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll71
-rw-r--r--llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll13
-rw-r--r--llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll17
-rw-r--r--llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll45
-rw-r--r--llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll38
-rw-r--r--llvm/test/Transforms/LoopVectorize/is_fpclass.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll180
-rw-r--r--llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll144
-rw-r--r--llvm/test/Transforms/LoopVectorize/iv_outside_user.ll156
-rw-r--r--llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll138
-rw-r--r--llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll23
-rw-r--r--llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll45
-rw-r--r--llvm/test/Transforms/LoopVectorize/loop-form.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll22
-rw-r--r--llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/metadata.ll126
-rw-r--r--llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll63
-rw-r--r--llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/optsize.ll115
-rw-r--r--llvm/test/Transforms/LoopVectorize/phi-cost.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr32859.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr44488-predication.ll21
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll57
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll17
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll26
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr66616.ll29
-rw-r--r--llvm/test/Transforms/LoopVectorize/predicate-switch.ll58
-rw-r--r--llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll68
-rw-r--r--llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll560
-rw-r--r--llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll76
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop.ll949
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-order.ll116
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-predselect.ll40
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll48
-rw-r--r--llvm/test/Transforms/LoopVectorize/reverse_induction.ll58
-rw-r--r--llvm/test/Transforms/LoopVectorize/runtime-check.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll49
-rw-r--r--llvm/test/Transforms/LoopVectorize/select-neg-cond.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll45
-rw-r--r--llvm/test/Transforms/LoopVectorize/select-reduction.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll28
-rw-r--r--llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll89
-rw-r--r--llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll32
-rw-r--r--llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll119
-rw-r--r--llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll105
-rw-r--r--llvm/test/Transforms/LoopVectorize/single_early_exit.ll39
-rw-r--r--llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll424
-rw-r--r--llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll11
-rw-r--r--llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll76
-rw-r--r--llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll81
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll17
-rw-r--r--llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll19
-rw-r--r--llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/trunc-reductions.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/trunc-shifts.ll84
-rw-r--r--llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform-blend.ll64
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll57
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll35
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll70
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll104
-rw-r--r--llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll69
-rw-r--r--llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll84
-rw-r--r--llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll50
-rw-r--r--llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll68
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll47
-rw-r--r--llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll35
-rw-r--r--llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll11
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll6
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll118
-rw-r--r--llvm/test/Transforms/SimplifyCFG/hoist-with-metadata.ll171
-rw-r--r--llvm/test/Unit/CMakeLists.txt5
-rw-r--r--llvm/test/Verifier/captures-metadata.ll37
-rw-r--r--llvm/test/tools/llvm-ir2vec/entities.ll28
-rw-r--r--llvm/tools/llvm-cgdata/llvm-cgdata.cpp2
-rw-r--r--llvm/tools/llvm-config/llvm-config.cpp6
-rw-r--r--llvm/tools/llvm-dwp/llvm-dwp.cpp2
-rw-r--r--llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp6
-rw-r--r--llvm/tools/llvm-jitlink/llvm-jitlink.cpp6
-rw-r--r--llvm/tools/llvm-opt-report/OptReport.cpp2
-rw-r--r--llvm/unittests/ADT/APFloatTest.cpp7
-rw-r--r--llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp13
-rw-r--r--llvm/unittests/Analysis/IR2VecTest.cpp388
-rw-r--r--llvm/unittests/Analysis/MemoryProfileInfoTest.cpp23
-rw-r--r--llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp5
-rw-r--r--llvm/unittests/CodeGen/RegAllocScoreTest.cpp3
-rw-r--r--llvm/unittests/Frontend/CMakeLists.txt1
-rw-r--r--llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp7
-rw-r--r--llvm/unittests/Object/ELFTest.cpp72
-rw-r--r--llvm/unittests/ProfileData/MemProfTest.cpp11
-rw-r--r--llvm/unittests/Support/Path.cpp4
-rw-r--r--llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni7
-rw-r--r--llvm/utils/gn/secondary/libcxx/include/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn1
-rw-r--r--llvm/utils/profcheck-xfail.txt2
-rw-r--r--mlir/.clang-format1
-rw-r--r--mlir/include/mlir-c/IR.h4
-rw-r--r--mlir/include/mlir/Bindings/Python/NanobindAdaptors.h38
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td22
-rw-r--r--mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td49
-rw-r--r--mlir/include/mlir/Dialect/Math/Transforms/Passes.td8
-rw-r--r--mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td39
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPClauses.td29
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td69
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td63
-rw-r--r--mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h1
-rw-r--r--mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td54
-rw-r--r--mlir/lib/Bindings/Python/IRCore.cpp70
-rw-r--r--mlir/lib/Bindings/Python/IRModule.h6
-rw-r--r--mlir/lib/Bindings/Python/IRTypes.cpp4
-rw-r--r--mlir/lib/Bindings/Python/MainModule.cpp6
-rw-r--r--mlir/lib/Bindings/Python/Rewrite.cpp2
-rw-r--r--mlir/lib/CAPI/IR/IR.cpp4
-rw-r--r--mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp52
-rw-r--r--mlir/lib/Dialect/Affine/Transforms/SimplifyAffineMinMax.cpp2
-rw-r--r--mlir/lib/Dialect/Arith/IR/ArithOps.cpp7
-rw-r--r--mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp3
-rw-r--r--mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp116
-rw-r--r--mlir/lib/Dialect/Math/Transforms/CMakeLists.txt1
-rw-r--r--mlir/lib/Dialect/Math/Transforms/SincosFusion.cpp80
-rw-r--r--mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp23
-rw-r--r--mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp451
-rw-r--r--mlir/lib/Dialect/Transform/IR/TransformOps.cpp7
-rw-r--r--mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp184
-rw-r--r--mlir/lib/Dialect/Vector/IR/VectorOps.cpp82
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp2
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp2
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorGather.cpp6
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp2
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp6
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorMultiReduction.cpp10
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp2
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp4
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorShuffle.cpp2
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorStep.cpp2
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorToFromElementsToShuffleTree.cpp2
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp6
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp12
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorEmulateMaskedLoadStore.cpp4
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp16
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp8
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp24
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp2
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp40
-rw-r--r--mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp24
-rw-r--r--mlir/lib/IR/Builders.cpp7
-rw-r--r--mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp2
-rw-r--r--mlir/lib/Target/IRDLToCpp/IRDLToCpp.cpp162
-rw-r--r--mlir/lib/Target/IRDLToCpp/Templates/PerOperationDecl.txt53
-rw-r--r--mlir/lib/Target/IRDLToCpp/Templates/PerOperationDef.txt14
-rw-r--r--mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp42
-rw-r--r--mlir/python/mlir/dialects/transform/structured.py6
-rw-r--r--mlir/python/mlir/dialects/transform/tune.py66
-rw-r--r--mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir30
-rw-r--r--mlir/test/Dialect/Arith/canonicalize.mlir12
-rw-r--r--mlir/test/Dialect/Arith/emulate-unsupported-floats.mlir11
-rw-r--r--mlir/test/Dialect/LLVMIR/rocdl.mlir32
-rw-r--r--mlir/test/Dialect/Math/sincos-fusion.mlir86
-rw-r--r--mlir/test/Dialect/MemRef/invalid.mlir16
-rw-r--r--mlir/test/Dialect/MemRef/ops.mlir9
-rw-r--r--mlir/test/Dialect/OpenMP/cli-canonical_loop.mlir198
-rw-r--r--mlir/test/Dialect/OpenMP/cli-tile.mlir138
-rw-r--r--mlir/test/Dialect/OpenMP/cli-unroll-heuristic.mlir28
-rw-r--r--mlir/test/Dialect/OpenMP/invalid-tile.mlir119
-rw-r--r--mlir/test/Dialect/Transform/test-promote-tensors.mlir104
-rw-r--r--mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir85
-rw-r--r--mlir/test/Dialect/Transform/test-tune-extension.mlir126
-rw-r--r--mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir19
-rw-r--r--mlir/test/Target/LLVMIR/openmp-cli-tile01.mlir101
-rw-r--r--mlir/test/Target/LLVMIR/openmp-cli-tile02.mlir190
-rw-r--r--mlir/test/Target/LLVMIR/rocdl.mlir28
-rw-r--r--mlir/test/lib/Dialect/TestIRDLToCpp/CMakeLists.txt2
-rw-r--r--mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp31
-rw-r--r--mlir/test/lib/Dialect/TestIRDLToCpp/test_conversion.testd.mlir18
-rw-r--r--mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp.irdl.mlir51
-rw-r--r--mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp_invalid_unsupported_types.irdl.mlir27
-rw-r--r--mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp2
-rw-r--r--mlir/test/mlir-tblgen/op-format-invalid.td2
-rw-r--r--mlir/test/mlir-tblgen/op-format-spec.td2
-rw-r--r--mlir/test/python/dialects/transform_tune_ext.py105
-rw-r--r--mlir/test/python/ir/operation.py8
-rw-r--r--mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp1
-rw-r--r--mlir/tools/mlir-tblgen/FormatGen.cpp2
-rw-r--r--mlir/tools/mlir-tblgen/OpFormatGen.cpp1
-rw-r--r--offload/libomptarget/OpenMP/InteropAPI.cpp41
-rw-r--r--offload/libomptarget/exports5
-rw-r--r--offload/plugins-nextgen/amdgpu/src/rtl.cpp31
-rw-r--r--offload/plugins-nextgen/cuda/src/rtl.cpp44
-rw-r--r--offload/test/offloading/fortran/target-declare-mapper-parent-allocatable.f9043
-rw-r--r--orc-rt/include/orc-rt/CallableTraitsHelper.h74
-rw-r--r--orc-rt/include/orc-rt/Error.h24
-rw-r--r--orc-rt/include/orc-rt/SPSWrapperFunction.h78
-rw-r--r--orc-rt/include/orc-rt/WrapperFunction.h95
-rw-r--r--orc-rt/unittests/CMakeLists.txt1
-rw-r--r--orc-rt/unittests/CallableTraitsHelperTest.cpp69
-rw-r--r--orc-rt/unittests/ErrorTest.cpp48
-rw-r--r--orc-rt/unittests/SPSWrapperFunctionTest.cpp74
-rw-r--r--utils/bazel/llvm-project-overlay/libc/BUILD.bazel19
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel1
1483 files changed, 61766 insertions, 36827 deletions
diff --git a/.clang-format b/.clang-format
index 9b3aa8b..ecb44bf 100644
--- a/.clang-format
+++ b/.clang-format
@@ -1 +1,2 @@
BasedOnStyle: LLVM
+LineEnding: LF
diff --git a/.github/workflows/libcxx-build-and-test.yaml b/.github/workflows/libcxx-build-and-test.yaml
index 5fe2ffb..1c07a0a 100644
--- a/.github/workflows/libcxx-build-and-test.yaml
+++ b/.github/workflows/libcxx-build-and-test.yaml
@@ -215,7 +215,7 @@ jobs:
- uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0
with:
# https://github.com/actions/runner-images/blob/main/images/macos/macos-15-Readme.md
- xcode-version: '16.3'
+ xcode-version: '26.0'
- uses: seanmiddleditch/gha-setup-ninja@3b1f8f94a2f8254bd26914c4ab9474d4f0015f67 # v6
- name: Build and test
run: |
diff --git a/bolt/include/bolt/Core/BinaryContext.h b/bolt/include/bolt/Core/BinaryContext.h
index 082f1ce..8960b19 100644
--- a/bolt/include/bolt/Core/BinaryContext.h
+++ b/bolt/include/bolt/Core/BinaryContext.h
@@ -190,6 +190,9 @@ class BinaryContext {
/// Unique build ID if available for the binary.
std::optional<std::string> FileBuildID;
+ /// GNU property note indicating AArch64 BTI.
+ bool UsesBTI{false};
+
/// Set of all sections.
struct CompareSections {
bool operator()(const BinarySection *A, const BinarySection *B) const {
@@ -384,6 +387,9 @@ public:
}
void setFileBuildID(StringRef ID) { FileBuildID = std::string(ID); }
+ bool usesBTI() const { return UsesBTI; }
+ void setUsesBTI(bool Value) { UsesBTI = Value; }
+
bool hasSymbolsWithFileName() const { return HasSymbolsWithFileName; }
void setHasSymbolsWithFileName(bool Value) { HasSymbolsWithFileName = Value; }
diff --git a/bolt/include/bolt/Rewrite/MetadataRewriters.h b/bolt/include/bolt/Rewrite/MetadataRewriters.h
index b71bd6c..2c09c879 100644
--- a/bolt/include/bolt/Rewrite/MetadataRewriters.h
+++ b/bolt/include/bolt/Rewrite/MetadataRewriters.h
@@ -27,6 +27,8 @@ std::unique_ptr<MetadataRewriter> createPseudoProbeRewriter(BinaryContext &);
std::unique_ptr<MetadataRewriter> createSDTRewriter(BinaryContext &);
+std::unique_ptr<MetadataRewriter> createGNUPropertyRewriter(BinaryContext &);
+
} // namespace bolt
} // namespace llvm
diff --git a/bolt/include/bolt/Rewrite/RewriteInstance.h b/bolt/include/bolt/Rewrite/RewriteInstance.h
index 19dcce8..0fe2e32 100644
--- a/bolt/include/bolt/Rewrite/RewriteInstance.h
+++ b/bolt/include/bolt/Rewrite/RewriteInstance.h
@@ -249,12 +249,11 @@ private:
/// Analyze relocation \p Rel.
/// Return true if the relocation was successfully processed, false otherwise.
/// The \p SymbolName, \p SymbolAddress, \p Addend and \p ExtractedValue
- /// parameters will be set on success. The \p Skip argument indicates
- /// that the relocation was analyzed, but it must not be processed.
+ /// parameters will be set on success.
bool analyzeRelocation(const object::RelocationRef &Rel, uint32_t &RType,
std::string &SymbolName, bool &IsSectionRelocation,
uint64_t &SymbolAddress, int64_t &Addend,
- uint64_t &ExtractedValue, bool &Skip) const;
+ uint64_t &ExtractedValue) const;
/// Rewrite non-allocatable sections with modifications.
void rewriteNoteSections();
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index 98440cd..b7ded6b 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -1662,7 +1662,7 @@ void BinaryContext::preprocessDWODebugInfo() {
"files.\n";
}
// Prevent failures when DWOName is already an absolute path.
- sys::fs::make_absolute(DWOCompDir, AbsolutePath);
+ sys::path::make_absolute(DWOCompDir, AbsolutePath);
DWARFUnit *DWOCU =
DwarfUnit->getNonSkeletonUnitDIE(false, AbsolutePath).getDwarfUnit();
if (!DWOCU->isDWOUnit()) {
diff --git a/bolt/lib/Core/Relocation.cpp b/bolt/lib/Core/Relocation.cpp
index f882627..4b827b6 100644
--- a/bolt/lib/Core/Relocation.cpp
+++ b/bolt/lib/Core/Relocation.cpp
@@ -81,7 +81,6 @@ static bool isSupportedAArch64(uint32_t Type) {
case ELF::R_AARCH64_LD64_GOT_LO12_NC:
case ELF::R_AARCH64_TLSDESC_LD64_LO12:
case ELF::R_AARCH64_TLSDESC_ADD_LO12:
- case ELF::R_AARCH64_TLSDESC_CALL:
case ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
case ELF::R_AARCH64_PREL16:
case ELF::R_AARCH64_PREL32:
@@ -193,7 +192,6 @@ static size_t getSizeForTypeAArch64(uint32_t Type) {
case ELF::R_AARCH64_LD64_GOT_LO12_NC:
case ELF::R_AARCH64_TLSDESC_LD64_LO12:
case ELF::R_AARCH64_TLSDESC_ADD_LO12:
- case ELF::R_AARCH64_TLSDESC_CALL:
case ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
case ELF::R_AARCH64_PREL32:
case ELF::R_AARCH64_MOVW_UABS_G0:
@@ -248,7 +246,14 @@ static bool skipRelocationTypeX86(uint32_t Type) {
}
static bool skipRelocationTypeAArch64(uint32_t Type) {
- return Type == ELF::R_AARCH64_NONE || Type == ELF::R_AARCH64_LD_PREL_LO19;
+ switch (Type) {
+ default:
+ return false;
+ case ELF::R_AARCH64_NONE:
+ case ELF::R_AARCH64_LD_PREL_LO19:
+ case ELF::R_AARCH64_TLSDESC_CALL:
+ return true;
+ }
}
static bool skipRelocationTypeRISCV(uint32_t Type) {
@@ -362,7 +367,6 @@ static uint64_t extractValueAArch64(uint32_t Type, uint64_t Contents,
return static_cast<int64_t>(PC) + SignExtend64<32>(Contents & 0xffffffff);
case ELF::R_AARCH64_PREL64:
return static_cast<int64_t>(PC) + Contents;
- case ELF::R_AARCH64_TLSDESC_CALL:
case ELF::R_AARCH64_JUMP26:
case ELF::R_AARCH64_CALL26:
// Immediate goes in bits 25:0 of B and BL.
@@ -552,7 +556,6 @@ static bool isGOTAArch64(uint32_t Type) {
case ELF::R_AARCH64_TLSDESC_ADR_PAGE21:
case ELF::R_AARCH64_TLSDESC_LD64_LO12:
case ELF::R_AARCH64_TLSDESC_ADD_LO12:
- case ELF::R_AARCH64_TLSDESC_CALL:
return true;
}
}
@@ -591,7 +594,6 @@ static bool isTLSAArch64(uint32_t Type) {
case ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
case ELF::R_AARCH64_TLSDESC_LD64_LO12:
case ELF::R_AARCH64_TLSDESC_ADD_LO12:
- case ELF::R_AARCH64_TLSDESC_CALL:
case ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
return true;
}
@@ -667,7 +669,6 @@ static bool isPCRelativeAArch64(uint32_t Type) {
case ELF::R_AARCH64_MOVW_UABS_G2_NC:
case ELF::R_AARCH64_MOVW_UABS_G3:
return false;
- case ELF::R_AARCH64_TLSDESC_CALL:
case ELF::R_AARCH64_CALL26:
case ELF::R_AARCH64_JUMP26:
case ELF::R_AARCH64_TSTBR14:
diff --git a/bolt/lib/Passes/PAuthGadgetScanner.cpp b/bolt/lib/Passes/PAuthGadgetScanner.cpp
index cfe4b6b..01b350b 100644
--- a/bolt/lib/Passes/PAuthGadgetScanner.cpp
+++ b/bolt/lib/Passes/PAuthGadgetScanner.cpp
@@ -14,6 +14,7 @@
#include "bolt/Passes/PAuthGadgetScanner.h"
#include "bolt/Core/ParallelUtilities.h"
#include "bolt/Passes/DataflowAnalysis.h"
+#include "bolt/Utils/CommandLineOpts.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/MC/MCInst.h"
@@ -26,6 +27,11 @@ namespace llvm {
namespace bolt {
namespace PAuthGadgetScanner {
+static cl::opt<bool> AuthTrapsOnFailure(
+ "auth-traps-on-failure",
+ cl::desc("Assume authentication instructions always trap on failure"),
+ cl::cat(opts::BinaryAnalysisCategory));
+
[[maybe_unused]] static void traceInst(const BinaryContext &BC, StringRef Label,
const MCInst &MI) {
dbgs() << " " << Label << ": ";
@@ -82,8 +88,8 @@ public:
TrackedRegisters(ArrayRef<MCPhysReg> RegsToTrack)
: Registers(RegsToTrack),
RegToIndexMapping(getMappingSize(RegsToTrack), NoIndex) {
- for (unsigned I = 0; I < RegsToTrack.size(); ++I)
- RegToIndexMapping[RegsToTrack[I]] = I;
+ for (auto [MappedIndex, Reg] : llvm::enumerate(RegsToTrack))
+ RegToIndexMapping[Reg] = MappedIndex;
}
ArrayRef<MCPhysReg> getRegisters() const { return Registers; }
@@ -197,9 +203,9 @@ struct SrcState {
SafeToDerefRegs &= StateIn.SafeToDerefRegs;
TrustedRegs &= StateIn.TrustedRegs;
- for (unsigned I = 0; I < LastInstWritingReg.size(); ++I)
- for (const MCInst *J : StateIn.LastInstWritingReg[I])
- LastInstWritingReg[I].insert(J);
+ for (auto [ThisSet, OtherSet] :
+ llvm::zip_equal(LastInstWritingReg, StateIn.LastInstWritingReg))
+ ThisSet.insert_range(OtherSet);
return *this;
}
@@ -218,11 +224,9 @@ struct SrcState {
static void printInstsShort(raw_ostream &OS,
ArrayRef<SetOfRelatedInsts> Insts) {
OS << "Insts: ";
- for (unsigned I = 0; I < Insts.size(); ++I) {
- auto &Set = Insts[I];
+ for (auto [I, PtrSet] : llvm::enumerate(Insts)) {
OS << "[" << I << "](";
- for (const MCInst *MCInstP : Set)
- OS << MCInstP << " ";
+ interleave(PtrSet, OS, " ");
OS << ")";
}
}
@@ -364,6 +368,34 @@ protected:
return Clobbered;
}
+ std::optional<MCPhysReg> getRegMadeTrustedByChecking(const MCInst &Inst,
+ SrcState Cur) const {
+ // This function cannot return multiple registers. This is never the case
+ // on AArch64.
+ std::optional<MCPhysReg> RegCheckedByInst =
+ BC.MIB->getAuthCheckedReg(Inst, /*MayOverwrite=*/false);
+ if (RegCheckedByInst && Cur.SafeToDerefRegs[*RegCheckedByInst])
+ return *RegCheckedByInst;
+
+ auto It = CheckerSequenceInfo.find(&Inst);
+ if (It == CheckerSequenceInfo.end())
+ return std::nullopt;
+
+ MCPhysReg RegCheckedBySequence = It->second.first;
+ const MCInst *FirstCheckerInst = It->second.second;
+
+ // FirstCheckerInst should belong to the same basic block (see the
+ // assertion in DataflowSrcSafetyAnalysis::run()), meaning it was
+ // deterministically processed a few steps before this instruction.
+ const SrcState &StateBeforeChecker = getStateBefore(*FirstCheckerInst);
+
+ // The sequence checks the register, but it should be authenticated before.
+ if (!StateBeforeChecker.SafeToDerefRegs[RegCheckedBySequence])
+ return std::nullopt;
+
+ return RegCheckedBySequence;
+ }
+
// Returns all registers that can be treated as if they are written by an
// authentication instruction.
SmallVector<MCPhysReg> getRegsMadeSafeToDeref(const MCInst &Point,
@@ -382,22 +414,43 @@ protected:
// ... an address can be updated in a safe manner, producing the result
// which is as trusted as the input address.
if (auto DstAndSrc = BC.MIB->analyzeAddressArithmeticsForPtrAuth(Point)) {
- if (Cur.SafeToDerefRegs[DstAndSrc->second])
- Regs.push_back(DstAndSrc->first);
+ auto [DstReg, SrcReg] = *DstAndSrc;
+ if (Cur.SafeToDerefRegs[SrcReg])
+ Regs.push_back(DstReg);
}
+ // Make sure explicit checker sequence keeps register safe-to-dereference
+ // when the register would be clobbered according to the regular rules:
+ //
+ // ; LR is safe to dereference here
+ // mov x16, x30 ; start of the sequence, LR is s-t-d right before
+ // xpaclri ; clobbers LR, LR is not safe anymore
+ // cmp x30, x16
+ // b.eq 1f ; end of the sequence: LR is marked as trusted
+ // brk 0x1234
+ // 1:
+ // ; at this point LR would be marked as trusted,
+ // ; but not safe-to-dereference
+ //
+ // or even just
+ //
+ // ; X1 is safe to dereference here
+ // ldr x0, [x1, #8]!
+ // ; X1 is trusted here, but it was clobbered due to address write-back
+ if (auto CheckedReg = getRegMadeTrustedByChecking(Point, Cur))
+ Regs.push_back(*CheckedReg);
+
return Regs;
}
// Returns all registers made trusted by this instruction.
SmallVector<MCPhysReg> getRegsMadeTrusted(const MCInst &Point,
const SrcState &Cur) const {
+ assert(!AuthTrapsOnFailure && "Use getRegsMadeSafeToDeref instead");
SmallVector<MCPhysReg> Regs;
// An authenticated pointer can be checked, or
- std::optional<MCPhysReg> CheckedReg =
- BC.MIB->getAuthCheckedReg(Point, /*MayOverwrite=*/false);
- if (CheckedReg && Cur.SafeToDerefRegs[*CheckedReg])
+ if (auto CheckedReg = getRegMadeTrustedByChecking(Point, Cur))
Regs.push_back(*CheckedReg);
// ... a pointer can be authenticated by an instruction that always checks
@@ -408,19 +461,6 @@ protected:
if (AutReg && IsChecked)
Regs.push_back(*AutReg);
- if (CheckerSequenceInfo.contains(&Point)) {
- MCPhysReg CheckedReg;
- const MCInst *FirstCheckerInst;
- std::tie(CheckedReg, FirstCheckerInst) = CheckerSequenceInfo.at(&Point);
-
- // FirstCheckerInst should belong to the same basic block (see the
- // assertion in DataflowSrcSafetyAnalysis::run()), meaning it was
- // deterministically processed a few steps before this instruction.
- const SrcState &StateBeforeChecker = getStateBefore(*FirstCheckerInst);
- if (StateBeforeChecker.SafeToDerefRegs[CheckedReg])
- Regs.push_back(CheckedReg);
- }
-
// ... a safe address can be materialized, or
if (auto NewAddrReg = BC.MIB->getMaterializedAddressRegForPtrAuth(Point))
Regs.push_back(*NewAddrReg);
@@ -428,8 +468,9 @@ protected:
// ... an address can be updated in a safe manner, producing the result
// which is as trusted as the input address.
if (auto DstAndSrc = BC.MIB->analyzeAddressArithmeticsForPtrAuth(Point)) {
- if (Cur.TrustedRegs[DstAndSrc->second])
- Regs.push_back(DstAndSrc->first);
+ auto [DstReg, SrcReg] = *DstAndSrc;
+ if (Cur.TrustedRegs[SrcReg])
+ Regs.push_back(DstReg);
}
return Regs;
@@ -463,28 +504,11 @@ protected:
BitVector Clobbered = getClobberedRegs(Point);
SmallVector<MCPhysReg> NewSafeToDerefRegs =
getRegsMadeSafeToDeref(Point, Cur);
- SmallVector<MCPhysReg> NewTrustedRegs = getRegsMadeTrusted(Point, Cur);
-
- // Ideally, being trusted is a strictly stronger property than being
- // safe-to-dereference. To simplify the computation of Next state, enforce
- // this for NewSafeToDerefRegs and NewTrustedRegs. Additionally, this
- // fixes the properly for "cumulative" register states in tricky cases
- // like the following:
- //
- // ; LR is safe to dereference here
- // mov x16, x30 ; start of the sequence, LR is s-t-d right before
- // xpaclri ; clobbers LR, LR is not safe anymore
- // cmp x30, x16
- // b.eq 1f ; end of the sequence: LR is marked as trusted
- // brk 0x1234
- // 1:
- // ; at this point LR would be marked as trusted,
- // ; but not safe-to-dereference
- //
- for (auto TrustedReg : NewTrustedRegs) {
- if (!is_contained(NewSafeToDerefRegs, TrustedReg))
- NewSafeToDerefRegs.push_back(TrustedReg);
- }
+ // If authentication instructions trap on failure, safe-to-dereference
+ // registers are always trusted.
+ SmallVector<MCPhysReg> NewTrustedRegs =
+ AuthTrapsOnFailure ? NewSafeToDerefRegs
+ : getRegsMadeTrusted(Point, Cur);
// Then, compute the state after this instruction is executed.
SrcState Next = Cur;
@@ -521,6 +545,11 @@ protected:
dbgs() << ")\n";
});
+ // Being trusted is a strictly stronger property than being
+ // safe-to-dereference.
+ assert(!Next.TrustedRegs.test(Next.SafeToDerefRegs) &&
+ "SafeToDerefRegs should contain all TrustedRegs");
+
return Next;
}
@@ -836,9 +865,9 @@ struct DstState {
return (*this = StateIn);
CannotEscapeUnchecked &= StateIn.CannotEscapeUnchecked;
- for (unsigned I = 0; I < FirstInstLeakingReg.size(); ++I)
- for (const MCInst *J : StateIn.FirstInstLeakingReg[I])
- FirstInstLeakingReg[I].insert(J);
+ for (auto [ThisSet, OtherSet] :
+ llvm::zip_equal(FirstInstLeakingReg, StateIn.FirstInstLeakingReg))
+ ThisSet.insert_range(OtherSet);
return *this;
}
@@ -1004,8 +1033,7 @@ protected:
// ... an address can be updated in a safe manner, or
if (auto DstAndSrc = BC.MIB->analyzeAddressArithmeticsForPtrAuth(Inst)) {
- MCPhysReg DstReg, SrcReg;
- std::tie(DstReg, SrcReg) = *DstAndSrc;
+ auto [DstReg, SrcReg] = *DstAndSrc;
// Note that *all* registers containing the derived values must be safe,
// both source and destination ones. No temporaries are supported at now.
if (Cur.CannotEscapeUnchecked[SrcReg] &&
@@ -1045,7 +1073,7 @@ protected:
// If this instruction terminates the program immediately, no
// authentication oracles are possible past this point.
if (BC.MIB->isTrap(Point)) {
- LLVM_DEBUG({ traceInst(BC, "Trap instruction found", Point); });
+ LLVM_DEBUG(traceInst(BC, "Trap instruction found", Point));
DstState Next(NumRegs, RegsToTrackInstsFor.getNumTrackedRegisters());
Next.CannotEscapeUnchecked.set();
return Next;
@@ -1130,6 +1158,11 @@ public:
}
void run() override {
+ // As long as DstSafetyAnalysis is only computed to detect authentication
+ // oracles, it is a waste of time to compute it when authentication
+ // instructions are known to always trap on failure.
+ assert(!AuthTrapsOnFailure &&
+ "DstSafetyAnalysis is useless with faulting auth");
for (BinaryBasicBlock &BB : Func) {
if (auto CheckerInfo = BC.MIB->getAuthCheckedReg(BB)) {
LLVM_DEBUG({
@@ -1215,7 +1248,7 @@ public:
// starting to analyze Inst.
if (BC.MIB->isCall(Inst) || BC.MIB->isBranch(Inst) ||
BC.MIB->isReturn(Inst)) {
- LLVM_DEBUG({ traceInst(BC, "Control flow instruction", Inst); });
+ LLVM_DEBUG(traceInst(BC, "Control flow instruction", Inst));
S = createUnsafeState();
}
@@ -1360,7 +1393,7 @@ shouldReportUnsafeTailCall(const BinaryContext &BC, const BinaryFunction &BF,
// such libc, ignore tail calls performed by ELF entry function.
if (BC.StartFunctionAddress &&
*BC.StartFunctionAddress == Inst.getFunction()->getAddress()) {
- LLVM_DEBUG({ dbgs() << " Skipping tail call in ELF entry function.\n"; });
+ LLVM_DEBUG(dbgs() << " Skipping tail call in ELF entry function.\n");
return std::nullopt;
}
@@ -1434,7 +1467,7 @@ shouldReportAuthOracle(const BinaryContext &BC, const MCInstReference &Inst,
});
if (S.empty()) {
- LLVM_DEBUG({ dbgs() << " DstState is empty!\n"; });
+ LLVM_DEBUG(dbgs() << " DstState is empty!\n");
return make_generic_report(
Inst, "Warning: no state computed for an authentication instruction "
"(possibly unreachable)");
@@ -1461,7 +1494,7 @@ collectRegsToTrack(ArrayRef<PartialReport<MCPhysReg>> Reports) {
void FunctionAnalysisContext::findUnsafeUses(
SmallVector<PartialReport<MCPhysReg>> &Reports) {
auto Analysis = SrcSafetyAnalysis::create(BF, AllocatorId, {});
- LLVM_DEBUG({ dbgs() << "Running src register safety analysis...\n"; });
+ LLVM_DEBUG(dbgs() << "Running src register safety analysis...\n");
Analysis->run();
LLVM_DEBUG({
dbgs() << "After src register safety analysis:\n";
@@ -1518,8 +1551,7 @@ void FunctionAnalysisContext::findUnsafeUses(
const SrcState &S = Analysis->getStateBefore(Inst);
if (S.empty()) {
- LLVM_DEBUG(
- { traceInst(BC, "Instruction has no state, skipping", Inst); });
+ LLVM_DEBUG(traceInst(BC, "Instruction has no state, skipping", Inst));
assert(UnreachableBBReported && "Should be reported at least once");
(void)UnreachableBBReported;
return;
@@ -1546,8 +1578,7 @@ void FunctionAnalysisContext::augmentUnsafeUseReports(
SmallVector<MCPhysReg> RegsToTrack = collectRegsToTrack(Reports);
// Re-compute the analysis with register tracking.
auto Analysis = SrcSafetyAnalysis::create(BF, AllocatorId, RegsToTrack);
- LLVM_DEBUG(
- { dbgs() << "\nRunning detailed src register safety analysis...\n"; });
+ LLVM_DEBUG(dbgs() << "\nRunning detailed src register safety analysis...\n");
Analysis->run();
LLVM_DEBUG({
dbgs() << "After detailed src register safety analysis:\n";
@@ -1557,7 +1588,7 @@ void FunctionAnalysisContext::augmentUnsafeUseReports(
// Augment gadget reports.
for (auto &Report : Reports) {
MCInstReference Location = Report.Issue->Location;
- LLVM_DEBUG({ traceInst(BC, "Attaching clobbering info to", Location); });
+ LLVM_DEBUG(traceInst(BC, "Attaching clobbering info to", Location));
assert(Report.RequestedDetails &&
"Should be removed by handleSimpleReports");
auto DetailedInfo =
@@ -1571,9 +1602,11 @@ void FunctionAnalysisContext::findUnsafeDefs(
SmallVector<PartialReport<MCPhysReg>> &Reports) {
if (PacRetGadgetsOnly)
return;
+ if (AuthTrapsOnFailure)
+ return;
auto Analysis = DstSafetyAnalysis::create(BF, AllocatorId, {});
- LLVM_DEBUG({ dbgs() << "Running dst register safety analysis...\n"; });
+ LLVM_DEBUG(dbgs() << "Running dst register safety analysis...\n");
Analysis->run();
LLVM_DEBUG({
dbgs() << "After dst register safety analysis:\n";
@@ -1596,8 +1629,7 @@ void FunctionAnalysisContext::augmentUnsafeDefReports(
SmallVector<MCPhysReg> RegsToTrack = collectRegsToTrack(Reports);
// Re-compute the analysis with register tracking.
auto Analysis = DstSafetyAnalysis::create(BF, AllocatorId, RegsToTrack);
- LLVM_DEBUG(
- { dbgs() << "\nRunning detailed dst register safety analysis...\n"; });
+ LLVM_DEBUG(dbgs() << "\nRunning detailed dst register safety analysis...\n");
Analysis->run();
LLVM_DEBUG({
dbgs() << "After detailed dst register safety analysis:\n";
@@ -1607,7 +1639,7 @@ void FunctionAnalysisContext::augmentUnsafeDefReports(
// Augment gadget reports.
for (auto &Report : Reports) {
MCInstReference Location = Report.Issue->Location;
- LLVM_DEBUG({ traceInst(BC, "Attaching leakage info to", Location); });
+ LLVM_DEBUG(traceInst(BC, "Attaching leakage info to", Location));
assert(Report.RequestedDetails &&
"Should be removed by handleSimpleReports");
auto DetailedInfo = std::make_shared<LeakageInfo>(
diff --git a/bolt/lib/Rewrite/CMakeLists.txt b/bolt/lib/Rewrite/CMakeLists.txt
index 7750360..5b15edc 100644
--- a/bolt/lib/Rewrite/CMakeLists.txt
+++ b/bolt/lib/Rewrite/CMakeLists.txt
@@ -25,6 +25,7 @@ add_llvm_library(LLVMBOLTRewrite
PseudoProbeRewriter.cpp
RewriteInstance.cpp
SDTRewriter.cpp
+ GNUPropertyRewriter.cpp
NO_EXPORT
DISABLE_LLVM_LINK_LLVM_DYLIB
diff --git a/bolt/lib/Rewrite/DWARFRewriter.cpp b/bolt/lib/Rewrite/DWARFRewriter.cpp
index 5c89a42..7366d2a 100644
--- a/bolt/lib/Rewrite/DWARFRewriter.cpp
+++ b/bolt/lib/Rewrite/DWARFRewriter.cpp
@@ -1853,7 +1853,7 @@ void DWARFRewriter::writeDWOFiles(
else if (!sys::fs::exists(CompDir))
CompDir = ".";
// Prevent failures when DWOName is already an absolute path.
- sys::fs::make_absolute(CompDir, AbsolutePath);
+ sys::path::make_absolute(CompDir, AbsolutePath);
std::error_code EC;
std::unique_ptr<ToolOutputFile> TempOut =
diff --git a/bolt/lib/Rewrite/GNUPropertyRewriter.cpp b/bolt/lib/Rewrite/GNUPropertyRewriter.cpp
new file mode 100644
index 0000000..f61c08e
--- /dev/null
+++ b/bolt/lib/Rewrite/GNUPropertyRewriter.cpp
@@ -0,0 +1,147 @@
+//===- bolt/Rewrite/GNUPropertyRewriter.cpp -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Read the .note.gnu.property section.
+//
+//===----------------------------------------------------------------------===//
+
+#include "bolt/Rewrite/MetadataRewriter.h"
+#include "bolt/Rewrite/MetadataRewriters.h"
+#include "llvm/Support/Errc.h"
+
+using namespace llvm;
+using namespace bolt;
+
+namespace {
+
+class GNUPropertyRewriter final : public MetadataRewriter {
+
+ Expected<uint32_t> decodeGNUPropertyNote(StringRef Desc);
+
+public:
+ GNUPropertyRewriter(StringRef Name, BinaryContext &BC)
+ : MetadataRewriter(Name, BC) {}
+
+ Error sectionInitializer() override;
+};
+
+Error GNUPropertyRewriter::sectionInitializer() {
+
+ ErrorOr<BinarySection &> Sec =
+ BC.getUniqueSectionByName(".note.gnu.property");
+ if (!Sec)
+ return Error::success();
+
+ // Accumulate feature bits
+ uint32_t FeaturesAcc = 0;
+
+ StringRef Buf = Sec->getContents();
+ DataExtractor DE(Buf, BC.AsmInfo->isLittleEndian(),
+ BC.AsmInfo->getCodePointerSize());
+ DataExtractor::Cursor Cursor(0);
+ while (Cursor && !DE.eof(Cursor)) {
+ const uint32_t NameSz = DE.getU32(Cursor);
+ const uint32_t DescSz = DE.getU32(Cursor);
+ const uint32_t Type = DE.getU32(Cursor);
+
+ StringRef Name =
+ NameSz ? Buf.slice(Cursor.tell(), Cursor.tell() + NameSz) : "<empty>";
+ Cursor.seek(alignTo(Cursor.tell() + NameSz, 4));
+
+ const uint64_t DescOffset = Cursor.tell();
+ StringRef Desc =
+ DescSz ? Buf.slice(DescOffset, DescOffset + DescSz) : "<empty>";
+ Cursor.seek(alignTo(DescOffset + DescSz, 4));
+ if (!Cursor)
+ return createStringError(
+ errc::executable_format_error,
+ "out of bounds while reading .note.gnu.property section: %s",
+ toString(Cursor.takeError()).c_str());
+
+ if (Type == ELF::NT_GNU_PROPERTY_TYPE_0 && Name.starts_with("GNU") &&
+ DescSz) {
+ auto Features = decodeGNUPropertyNote(Desc);
+ if (!Features)
+ return Features.takeError();
+ FeaturesAcc |= *Features;
+ }
+ }
+
+ if (BC.isAArch64()) {
+ BC.setUsesBTI(FeaturesAcc & llvm::ELF::GNU_PROPERTY_AARCH64_FEATURE_1_BTI);
+ if (BC.usesBTI())
+ BC.outs() << "BOLT-WARNING: binary is using BTI. Optimized binary may be "
+ "corrupted\n";
+ }
+
+ return Error::success();
+}
+
+/// \p Desc contains an array of property descriptors. Each member has the
+/// following structure:
+/// typedef struct {
+/// Elf_Word pr_type;
+/// Elf_Word pr_datasz;
+/// unsigned char pr_data[PR_DATASZ];
+/// unsigned char pr_padding[PR_PADDING];
+/// } Elf_Prop;
+///
+/// As there is no guarantee that the features are encoded in which element of
+/// the array, we have to read all, and OR together the result.
+Expected<uint32_t> GNUPropertyRewriter::decodeGNUPropertyNote(StringRef Desc) {
+ DataExtractor DE(Desc, BC.AsmInfo->isLittleEndian(),
+ BC.AsmInfo->getCodePointerSize());
+ DataExtractor::Cursor Cursor(0);
+ const uint32_t Align = DE.getAddressSize();
+
+ std::optional<uint32_t> Features = 0;
+ while (Cursor && !DE.eof(Cursor)) {
+ const uint32_t PrType = DE.getU32(Cursor);
+ const uint32_t PrDataSz = DE.getU32(Cursor);
+
+ const uint64_t PrDataStart = Cursor.tell();
+ const uint64_t PrDataEnd = PrDataStart + PrDataSz;
+ Cursor.seek(PrDataEnd);
+ if (!Cursor)
+ return createStringError(
+ errc::executable_format_error,
+ "out of bounds while reading .note.gnu.property section: %s",
+ toString(Cursor.takeError()).c_str());
+
+ if (PrType == llvm::ELF::GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
+ if (PrDataSz != 4) {
+ return createStringError(
+ errc::executable_format_error,
+ "Property descriptor size has to be 4 bytes on AArch64\n");
+ }
+ DataExtractor::Cursor Tmp(PrDataStart);
+ // PrDataSz = 4 -> PrData is uint32_t
+ const uint32_t FeaturesItem = DE.getU32(Tmp);
+ if (!Tmp)
+ return createStringError(
+ errc::executable_format_error,
+ "failed to read property from .note.gnu.property section: %s",
+ toString(Tmp.takeError()).c_str());
+ Features = Features ? (*Features | FeaturesItem) : FeaturesItem;
+ }
+
+ Cursor.seek(alignTo(PrDataEnd, Align));
+ if (!Cursor)
+ return createStringError(errc::executable_format_error,
+ "out of bounds while reading property array in "
+ ".note.gnu.property section: %s",
+ toString(Cursor.takeError()).c_str());
+ }
+ return Features.value_or(0u);
+}
+} // namespace
+
+std::unique_ptr<MetadataRewriter>
+llvm::bolt::createGNUPropertyRewriter(BinaryContext &BC) {
+ return std::make_unique<GNUPropertyRewriter>("gnu-property-rewriter", BC);
+}
diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp
index 8b78c53..8a25d0b 100644
--- a/bolt/lib/Rewrite/RewriteInstance.cpp
+++ b/bolt/lib/Rewrite/RewriteInstance.cpp
@@ -2274,8 +2274,7 @@ uint32_t getRelocationSymbol(const ELFObjectFileBase *Obj,
bool RewriteInstance::analyzeRelocation(
const RelocationRef &Rel, uint32_t &RType, std::string &SymbolName,
bool &IsSectionRelocation, uint64_t &SymbolAddress, int64_t &Addend,
- uint64_t &ExtractedValue, bool &Skip) const {
- Skip = false;
+ uint64_t &ExtractedValue) const {
if (!Relocation::isSupported(RType))
return false;
@@ -2707,9 +2706,8 @@ void RewriteInstance::handleRelocation(const SectionRef &RelocatedSection,
int64_t Addend;
uint64_t ExtractedValue;
bool IsSectionRelocation;
- bool Skip;
if (!analyzeRelocation(Rel, RType, SymbolName, IsSectionRelocation,
- SymbolAddress, Addend, ExtractedValue, Skip)) {
+ SymbolAddress, Addend, ExtractedValue)) {
LLVM_DEBUG({
dbgs() << "BOLT-WARNING: failed to analyze relocation @ offset = "
<< formatv("{0:x}; type name = {1}\n", Rel.getOffset(), TypeName);
@@ -2718,14 +2716,6 @@ void RewriteInstance::handleRelocation(const SectionRef &RelocatedSection,
return;
}
- if (Skip) {
- LLVM_DEBUG({
- dbgs() << "BOLT-DEBUG: skipping relocation @ offset = "
- << formatv("{0:x}; type name = {1}\n", Rel.getOffset(), TypeName);
- });
- return;
- }
-
if (!IsFromCode && !IsWritable && (IsX86 || IsAArch64) &&
Relocation::isPCRelative(RType)) {
BinaryData *BD = BC->getBinaryDataContainingAddress(Rel.getOffset());
@@ -3341,6 +3331,8 @@ void RewriteInstance::initializeMetadataManager() {
MetadataManager.registerRewriter(createPseudoProbeRewriter(*BC));
MetadataManager.registerRewriter(createSDTRewriter(*BC));
+
+ MetadataManager.registerRewriter(createGNUPropertyRewriter(*BC));
}
void RewriteInstance::processSectionMetadata() {
diff --git a/bolt/test/AArch64/Inputs/property-note-bti.yaml b/bolt/test/AArch64/Inputs/property-note-bti.yaml
new file mode 100644
index 0000000..541ae92
--- /dev/null
+++ b/bolt/test/AArch64/Inputs/property-note-bti.yaml
@@ -0,0 +1,50 @@
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+ Machine: EM_AARCH64
+ Entry: 0x400510
+ProgramHeaders:
+ - Type: PT_NOTE
+ Flags: [ PF_R ]
+ FirstSec: .note.gnu.property
+ LastSec: .note.gnu.property
+ VAddr: 0x400338
+ Align: 0x8
+ - Type: PT_LOAD
+ Flags: [ PF_R ]
+ VAddr: 0x0
+ Align: 0x10000
+ FileSize: 0xf8
+ MemSize: 0xf8
+ Offset: 0x0
+Sections:
+ - Name: .text
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ Address: 0x2a0000
+ AddressAlign: 0x4
+ Content: 400580d2c0035fd6
+ - Name: .note.gnu.property
+ Type: SHT_NOTE
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400338
+ AddressAlign: 0x8
+ Notes:
+ - Name: GNU
+ Desc: 000000C0040000000300000000000000
+ Type: NT_GNU_PROPERTY_TYPE_0
+ - Type: SectionHeaderTable
+ Sections:
+ - Name: .note.gnu.property
+ - Name: .symtab
+ - Name: .strtab
+ - Name: .shstrtab
+ - Name: .text
+Symbols:
+ - Name: .note.gnu.property
+ Type: STT_SECTION
+ Section: .note.gnu.property
+ Value: 0x400338
+...
diff --git a/bolt/test/AArch64/Inputs/property-note-nobti.yaml b/bolt/test/AArch64/Inputs/property-note-nobti.yaml
new file mode 100644
index 0000000..a041a58
--- /dev/null
+++ b/bolt/test/AArch64/Inputs/property-note-nobti.yaml
@@ -0,0 +1,50 @@
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+ Machine: EM_AARCH64
+ Entry: 0x400510
+ProgramHeaders:
+ - Type: PT_NOTE
+ Flags: [ PF_R ]
+ FirstSec: .note.gnu.property
+ LastSec: .note.gnu.property
+ VAddr: 0x400338
+ Align: 0x8
+ - Type: PT_LOAD
+ Flags: [ PF_R ]
+ VAddr: 0x0
+ Align: 0x10000
+ FileSize: 0xf8
+ MemSize: 0xf8
+ Offset: 0x0
+Sections:
+ - Name: .text
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ Address: 0x2a0000
+ AddressAlign: 0x4
+ Content: 400580d2c0035fd6
+ - Name: .note.gnu.property
+ Type: SHT_NOTE
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400338
+ AddressAlign: 0x8
+ Notes:
+ - Name: GNU
+ Desc: 000000C0040000000200000000000000
+ Type: NT_GNU_PROPERTY_TYPE_0
+ - Type: SectionHeaderTable
+ Sections:
+ - Name: .note.gnu.property
+ - Name: .symtab
+ - Name: .strtab
+ - Name: .shstrtab
+ - Name: .text
+Symbols:
+ - Name: .note.gnu.property
+ Type: STT_SECTION
+ Section: .note.gnu.property
+ Value: 0x400338
+...
diff --git a/bolt/test/AArch64/bti-note.test b/bolt/test/AArch64/bti-note.test
new file mode 100644
index 0000000..1ec9d77
--- /dev/null
+++ b/bolt/test/AArch64/bti-note.test
@@ -0,0 +1,10 @@
+// This test checks that the GNUPropertyRewriter can decode the BTI feature flag.
+// It decodes an executable with BTI, and checks for the warning.
+
+RUN: yaml2obj %p/Inputs/property-note-bti.yaml &> %t.exe
+
+RUN: llvm-readelf -n %t.exe | FileCheck %s
+CHECK: BTI
+
+RUN: llvm-bolt %t.exe -o %t.exe.bolt | FileCheck %s -check-prefix=CHECK-BOLT
+CHECK-BOLT: BOLT-WARNING: binary is using BTI. Optimized binary may be corrupted
diff --git a/bolt/test/AArch64/no-bti-note.test b/bolt/test/AArch64/no-bti-note.test
new file mode 100644
index 0000000..28cce34
--- /dev/null
+++ b/bolt/test/AArch64/no-bti-note.test
@@ -0,0 +1,10 @@
+// This test checks that the GNUPropertyRewriter can decode the BTI feature flag.
+// It decodes an executable without BTI, and checks for the warning.
+
+RUN: yaml2obj %p/Inputs/property-note-nobti.yaml &> %t.exe
+
+RUN: llvm-readelf -n %t.exe | FileCheck %s
+CHECK-NOT: BTI
+
+RUN: llvm-bolt %t.exe -o %t.exe.bolt | FileCheck %s -check-prefix=CHECK-BOLT
+CHECK-BOLT-NOT: BOLT-WARNING: binary is using BTI. Optimized binary may be corrupted
diff --git a/bolt/test/AArch64/tls-desc-call.s b/bolt/test/AArch64/tls-desc-call.s
new file mode 100644
index 0000000..0575380
--- /dev/null
+++ b/bolt/test/AArch64/tls-desc-call.s
@@ -0,0 +1,35 @@
+# RUN: %clang %cflags %s -o %t.so -fPIC -shared -Wl,-q
+# RUN: llvm-bolt %t.so -o %t.bolt --debug-only=bolt 2>&1 | FileCheck %s
+
+# REQUIRES: asserts
+
+## Verify that R_AARCH64_TLSDESC_CALL relocations are ignored
+
+# CHECK-NOT: Relocation {{.*}} R_AARCH64_TLSDESC_CALL
+
+ .text
+ .globl get_tls_var
+ .p2align 2
+ .type get_tls_var,@function
+get_tls_var:
+ .cfi_startproc
+ str x30, [sp, #-16]!
+ adrp x0, :tlsdesc:tls_var
+ ldr x1, [x0, :tlsdesc_lo12:tls_var]
+ add x0, x0, :tlsdesc_lo12:tls_var
+ .tlsdesccall tls_var
+ blr x1
+ mrs x8, TPIDR_EL0
+ ldr w0, [x8, x0]
+ ldr x30, [sp], #16
+ ret
+ .size get_tls_var, .-get_tls_var
+ .cfi_endproc
+
+ .type tls_var,@object
+ .section .tdata,"awT",@progbits
+ .globl tls_var
+ .p2align 2, 0x0
+tls_var:
+ .word 42
+ .size tls_var, 4
diff --git a/bolt/test/binary-analysis/AArch64/cmdline-args.test b/bolt/test/binary-analysis/AArch64/cmdline-args.test
index 3e70b2c..9660ad3 100644
--- a/bolt/test/binary-analysis/AArch64/cmdline-args.test
+++ b/bolt/test/binary-analysis/AArch64/cmdline-args.test
@@ -33,6 +33,7 @@ HELP-NEXT: OPTIONS:
HELP-EMPTY:
HELP-NEXT: BinaryAnalysis options:
HELP-EMPTY:
+HELP-NEXT: --auth-traps-on-failure - Assume authentication instructions always trap on failure
HELP-NEXT: --scanners=<value> - which gadget scanners to run
HELP-NEXT: =pacret - pac-ret: return address protection (subset of "pauth")
HELP-NEXT: =pauth - All Pointer Authentication scanners
diff --git a/bolt/test/binary-analysis/AArch64/gs-pauth-authentication-oracles.s b/bolt/test/binary-analysis/AArch64/gs-pauth-authentication-oracles.s
index f44ba21..9f580b6 100644
--- a/bolt/test/binary-analysis/AArch64/gs-pauth-authentication-oracles.s
+++ b/bolt/test/binary-analysis/AArch64/gs-pauth-authentication-oracles.s
@@ -1,6 +1,7 @@
// RUN: %clang %cflags -march=armv8.3-a %s -o %t.exe
-// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s
-// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck %s
+// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s
+// RUN: llvm-bolt-binary-analysis --scanners=pauth --auth-traps-on-failure %t.exe 2>&1 | FileCheck -check-prefix=FPAC %s
+// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck %s
// The detection of compiler-generated explicit pointer checks is tested in
// gs-pauth-address-checks.s, for that reason only test here "dummy-load" and
@@ -8,6 +9,7 @@
// detected per-instruction and per-BB.
// PACRET-NOT: authentication oracle found in function
+// FPAC-NOT: authentication oracle found in function
.text
diff --git a/bolt/test/binary-analysis/AArch64/gs-pauth-calls.s b/bolt/test/binary-analysis/AArch64/gs-pauth-calls.s
index fb0bc7c..5e88e10 100644
--- a/bolt/test/binary-analysis/AArch64/gs-pauth-calls.s
+++ b/bolt/test/binary-analysis/AArch64/gs-pauth-calls.s
@@ -1,6 +1,7 @@
// RUN: %clang %cflags -march=armv8.3-a %s -o %t.exe
-// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s
-// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck %s
+// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s
+// RUN: llvm-bolt-binary-analysis --scanners=pauth --auth-traps-on-failure %t.exe 2>&1 | FileCheck %s
+// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck %s
// PACRET-NOT: non-protected call found in function
diff --git a/bolt/test/binary-analysis/AArch64/gs-pauth-debug-output.s b/bolt/test/binary-analysis/AArch64/gs-pauth-debug-output.s
index b1cec7f..a3ad7ef 100644
--- a/bolt/test/binary-analysis/AArch64/gs-pauth-debug-output.s
+++ b/bolt/test/binary-analysis/AArch64/gs-pauth-debug-output.s
@@ -1,10 +1,14 @@
// REQUIRES: asserts
//
// RUN: %clang %cflags -march=armv8.3-a %s -o %t.exe
-// RUN: llvm-bolt-binary-analysis --scanners=pacret -no-threads \
-// RUN: -debug-only bolt-pauth-scanner %t.exe 2>&1 | FileCheck %s
-// RUN: llvm-bolt-binary-analysis --scanners=pauth -no-threads \
-// RUN: -debug-only bolt-pauth-scanner %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,PAUTH %s
+// RUN: llvm-bolt-binary-analysis --scanners=pacret --no-threads \
+// RUN: -debug-only bolt-pauth-scanner %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,NOFPAC %s
+// RUN: llvm-bolt-binary-analysis --scanners=pacret --no-threads --auth-traps-on-failure \
+// RUN: -debug-only bolt-pauth-scanner %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,FPAC %s
+// RUN: llvm-bolt-binary-analysis --scanners=pauth --no-threads \
+// RUN: -debug-only bolt-pauth-scanner %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,NOFPAC,AUTH-ORACLES,PAUTH %s
+// RUN: llvm-bolt-binary-analysis --scanners=pauth --no-threads --auth-traps-on-failure \
+// RUN: -debug-only bolt-pauth-scanner %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,FPAC,PAUTH %s
// Check the debug output generated by PAuth gadget scanner to make sure the
// that output is kept meaningful and to provide an overview of what happens
@@ -61,30 +65,54 @@ simple:
// CHECK-NEXT: State 1: src-state<empty>
// CHECK-NEXT: State 2: src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
// CHECK-NEXT: merged state: src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >
-// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( autiza x0, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
-// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: W0 X0 W0_HI , TrustedRegs: , Insts: >)
-// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( blr x0, src-state<SafeToDerefRegs: W0 X0 W0_HI , TrustedRegs: , Insts: >)
-// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
-// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( ldp x29, x30, [sp], #0x10, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
-// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
-// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( hint #29, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
-// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: , Insts: >)
-// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: , Insts: >)
-// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: , Insts: >)
-// CHECK-NEXT: DataflowSrcSafetyAnalysis::Confluence(
-// CHECK-NEXT: State 1: src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >
-// CHECK-NEXT: State 2: src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
-// CHECK-NEXT: merged state: src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >
-// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( autiza x0, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
-// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: W0 X0 W0_HI , TrustedRegs: , Insts: >)
-// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( blr x0, src-state<SafeToDerefRegs: W0 X0 W0_HI , TrustedRegs: , Insts: >)
-// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
-// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( ldp x29, x30, [sp], #0x10, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
-// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
-// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( hint #29, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
-// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: , Insts: >)
-// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: , Insts: >)
-// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( autiza x0, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: .. result: (src-state<SafeToDerefRegs: W0 X0 W0_HI , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( blr x0, src-state<SafeToDerefRegs: W0 X0 W0_HI , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: .. result: (src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ldp x29, x30, [sp], #0x10, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: .. result: (src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( hint #29, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: .. result: (src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: .. result: (src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: DataflowSrcSafetyAnalysis::Confluence(
+// NOFPAC-NEXT: State 1: src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >
+// NOFPAC-NEXT: State 2: src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: merged state: src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >
+// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( autiza x0, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: .. result: (src-state<SafeToDerefRegs: W0 X0 W0_HI , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( blr x0, src-state<SafeToDerefRegs: W0 X0 W0_HI , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: .. result: (src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ldp x29, x30, [sp], #0x10, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: .. result: (src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( hint #29, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: .. result: (src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: , Insts: >)
+// NOFPAC-NEXT: .. result: (src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: , Insts: >)
+// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( autiza x0, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// FPAC-NEXT: .. result: (src-state<SafeToDerefRegs: W0 X0 W0_HI , TrustedRegs: W0 X0 W0_HI , Insts: >)
+// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( blr x0, src-state<SafeToDerefRegs: W0 X0 W0_HI , TrustedRegs: W0 X0 W0_HI , Insts: >)
+// FPAC-NEXT: .. result: (src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ldp x29, x30, [sp], #0x10, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// FPAC-NEXT: .. result: (src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( hint #29, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// FPAC-NEXT: .. result: (src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: LR W30 W30_HI , Insts: >)
+// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: LR W30 W30_HI , Insts: >)
+// FPAC-NEXT: .. result: (src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: LR W30 W30_HI , Insts: >)
+// FPAC-NEXT: DataflowSrcSafetyAnalysis::Confluence(
+// FPAC-NEXT: State 1: src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >
+// FPAC-NEXT: State 2: src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// FPAC-NEXT: merged state: src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >
+// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( autiza x0, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// FPAC-NEXT: .. result: (src-state<SafeToDerefRegs: W0 X0 W0_HI , TrustedRegs: W0 X0 W0_HI , Insts: >)
+// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( blr x0, src-state<SafeToDerefRegs: W0 X0 W0_HI , TrustedRegs: W0 X0 W0_HI , Insts: >)
+// FPAC-NEXT: .. result: (src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ldp x29, x30, [sp], #0x10, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// FPAC-NEXT: .. result: (src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( hint #29, src-state<SafeToDerefRegs: , TrustedRegs: , Insts: >)
+// FPAC-NEXT: .. result: (src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: LR W30 W30_HI , Insts: >)
+// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: LR W30 W30_HI , Insts: >)
+// FPAC-NEXT: .. result: (src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: LR W30 W30_HI , Insts: >)
// CHECK-NEXT: After src register safety analysis:
// CHECK-NEXT: Binary Function "simple" {
// CHECK-NEXT: Number : 1
@@ -149,9 +177,9 @@ clobber:
// CHECK-EMPTY:
// CHECK-NEXT: Running detailed src register safety analysis...
// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( mov w30, #0x0, src-state<SafeToDerefRegs: LR W30 W30_HI , TrustedRegs: LR W30 W30_HI , Insts: [0]()>)
-// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: W30_HI , TrustedRegs: W30_HI , Insts: [0](0x{{[0-9a-f]+}} )>)
-// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state<SafeToDerefRegs: W30_HI , TrustedRegs: W30_HI , Insts: [0](0x{{[0-9a-f]+}} )>)
-// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: W30_HI , TrustedRegs: W30_HI , Insts: [0](0x{{[0-9a-f]+}} )>)
+// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: W30_HI , TrustedRegs: W30_HI , Insts: [0](0x{{[0-9a-f]+}})>)
+// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state<SafeToDerefRegs: W30_HI , TrustedRegs: W30_HI , Insts: [0](0x{{[0-9a-f]+}})>)
+// CHECK-NEXT: .. result: (src-state<SafeToDerefRegs: W30_HI , TrustedRegs: W30_HI , Insts: [0](0x{{[0-9a-f]+}})>)
// CHECK-NEXT: After detailed src register safety analysis:
// CHECK-NEXT: Binary Function "clobber" {
// ...
@@ -161,7 +189,7 @@ clobber:
// Iterating over the reports and attaching clobbering info:
// CHECK-EMPTY:
-// CHECK-NEXT: Attaching clobbering info to: 00000000: ret # DataflowSrcSafetyAnalysis: src-state<SafeToDerefRegs: BitVector, TrustedRegs: BitVector, Insts: [0](0x{{[0-9a-f]+}} )>
+// CHECK-NEXT: Attaching clobbering info to: 00000000: ret # DataflowSrcSafetyAnalysis: src-state<SafeToDerefRegs: BitVector, TrustedRegs: BitVector, Insts: [0](0x{{[0-9a-f]+}})>
.globl nocfg
.type nocfg,@function
@@ -255,53 +283,56 @@ auth_oracle:
// ...
// CHECK: End of Function "auth_oracle"
// ...
-// PAUTH: Running dst register safety analysis...
-// PAUTH-NEXT: DstSafetyAnalysis::ComputeNext( ret x30, dst-state<CannotEscapeUnchecked: , Insts: >)
-// PAUTH-NEXT: .. result: (dst-state<CannotEscapeUnchecked: LR W30 W30_HI , Insts: >)
-// PAUTH-NEXT: DstSafetyAnalysis::ComputeNext( autia x0, x1, dst-state<CannotEscapeUnchecked: LR W30 W30_HI , Insts: >)
-// PAUTH-NEXT: .. result: (dst-state<CannotEscapeUnchecked: LR W30 W30_HI , Insts: >)
-// PAUTH-NEXT: After dst register safety analysis:
-// PAUTH-NEXT: Binary Function "auth_oracle" {
-// PAUTH-NEXT: Number : 4
-// PAUTH-NEXT: State : CFG constructed
+// FPAC-NOT: Running dst register safety analysis
+// FPAC-NOT: DstSafetyAnalysis::ComputeNext
+// FPAC-NOT: {{.*dst-state.*}}
+// AUTH-ORACLES: Running dst register safety analysis...
+// AUTH-ORACLES-NEXT: DstSafetyAnalysis::ComputeNext( ret x30, dst-state<CannotEscapeUnchecked: , Insts: >)
+// AUTH-ORACLES-NEXT: .. result: (dst-state<CannotEscapeUnchecked: LR W30 W30_HI , Insts: >)
+// AUTH-ORACLES-NEXT: DstSafetyAnalysis::ComputeNext( autia x0, x1, dst-state<CannotEscapeUnchecked: LR W30 W30_HI , Insts: >)
+// AUTH-ORACLES-NEXT: .. result: (dst-state<CannotEscapeUnchecked: LR W30 W30_HI , Insts: >)
+// AUTH-ORACLES-NEXT: After dst register safety analysis:
+// AUTH-ORACLES-NEXT: Binary Function "auth_oracle" {
+// AUTH-ORACLES-NEXT: Number : 4
+// AUTH-ORACLES-NEXT: State : CFG constructed
// ...
-// PAUTH: BB Layout : [[BB0]]
-// PAUTH-NEXT: }
-// PAUTH-NEXT: [[BB0]] (2 instructions, align : 1)
-// PAUTH-NEXT: Entry Point
-// PAUTH-NEXT: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state<CannotEscapeUnchecked: BitVector, Insts: >
-// PAUTH-NEXT: 00000004: ret # DataflowDstSafetyAnalysis: dst-state<CannotEscapeUnchecked: BitVector, Insts: >
-// PAUTH-EMPTY:
-// PAUTH-NEXT: DWARF CFI Instructions:
-// PAUTH-NEXT: <empty>
-// PAUTH-NEXT: End of Function "auth_oracle"
-// PAUTH-EMPTY:
-// PAUTH-NEXT: Found auth inst: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state<CannotEscapeUnchecked: BitVector, Insts: >
-// PAUTH-NEXT: Authenticated reg: X0
-// PAUTH-NEXT: safe output registers: LR W30 W30_HI{{[ \t]*$}}
-// PAUTH-EMPTY:
-// PAUTH-NEXT: Running detailed dst register safety analysis...
-// PAUTH-NEXT: DstSafetyAnalysis::ComputeNext( ret x30, dst-state<CannotEscapeUnchecked: , Insts: [0]()>)
-// PAUTH-NEXT: .. result: (dst-state<CannotEscapeUnchecked: LR W30 W30_HI , Insts: [0]()>)
-// PAUTH-NEXT: DstSafetyAnalysis::ComputeNext( autia x0, x1, dst-state<CannotEscapeUnchecked: LR W30 W30_HI , Insts: [0]()>)
-// PAUTH-NEXT: .. result: (dst-state<CannotEscapeUnchecked: LR W30 W30_HI , Insts: [0](0x{{[0-9a-f]+}} )>)
-// PAUTH-NEXT: After detailed dst register safety analysis:
-// PAUTH-NEXT: Binary Function "auth_oracle" {
-// PAUTH-NEXT: Number : 4
-// PAUTH-NEXT: State : CFG constructed
+// AUTH-ORACLES: BB Layout : [[BB0]]
+// AUTH-ORACLES-NEXT: }
+// AUTH-ORACLES-NEXT: [[BB0]] (2 instructions, align : 1)
+// AUTH-ORACLES-NEXT: Entry Point
+// AUTH-ORACLES-NEXT: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state<CannotEscapeUnchecked: BitVector, Insts: >
+// AUTH-ORACLES-NEXT: 00000004: ret # DataflowDstSafetyAnalysis: dst-state<CannotEscapeUnchecked: BitVector, Insts: >
+// AUTH-ORACLES-EMPTY:
+// AUTH-ORACLES-NEXT: DWARF CFI Instructions:
+// AUTH-ORACLES-NEXT: <empty>
+// AUTH-ORACLES-NEXT: End of Function "auth_oracle"
+// AUTH-ORACLES-EMPTY:
+// AUTH-ORACLES-NEXT: Found auth inst: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state<CannotEscapeUnchecked: BitVector, Insts: >
+// AUTH-ORACLES-NEXT: Authenticated reg: X0
+// AUTH-ORACLES-NEXT: safe output registers: LR W30 W30_HI{{[ \t]*$}}
+// AUTH-ORACLES-EMPTY:
+// AUTH-ORACLES-NEXT: Running detailed dst register safety analysis...
+// AUTH-ORACLES-NEXT: DstSafetyAnalysis::ComputeNext( ret x30, dst-state<CannotEscapeUnchecked: , Insts: [0]()>)
+// AUTH-ORACLES-NEXT: .. result: (dst-state<CannotEscapeUnchecked: LR W30 W30_HI , Insts: [0]()>)
+// AUTH-ORACLES-NEXT: DstSafetyAnalysis::ComputeNext( autia x0, x1, dst-state<CannotEscapeUnchecked: LR W30 W30_HI , Insts: [0]()>)
+// AUTH-ORACLES-NEXT: .. result: (dst-state<CannotEscapeUnchecked: LR W30 W30_HI , Insts: [0](0x{{[0-9a-f]+}})>)
+// AUTH-ORACLES-NEXT: After detailed dst register safety analysis:
+// AUTH-ORACLES-NEXT: Binary Function "auth_oracle" {
+// AUTH-ORACLES-NEXT: Number : 4
+// AUTH-ORACLES-NEXT: State : CFG constructed
// ...
-// PAUTH: BB Layout : [[BB0]]
-// PAUTH-NEXT: }
-// PAUTH-NEXT: [[BB0]] (2 instructions, align : 1)
-// PAUTH-NEXT: Entry Point
-// PAUTH-NEXT: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state<CannotEscapeUnchecked: BitVector, Insts: [0](0x{{[0-9a-f]+}} )>
-// PAUTH-NEXT: 00000004: ret # DataflowDstSafetyAnalysis: dst-state<CannotEscapeUnchecked: BitVector, Insts: [0]()>
-// PAUTH-EMPTY:
-// PAUTH-NEXT: DWARF CFI Instructions:
-// PAUTH-NEXT: <empty>
-// PAUTH-NEXT: End of Function "auth_oracle"
-// PAUTH-EMPTY:
-// PAUTH-NEXT: Attaching leakage info to: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state<CannotEscapeUnchecked: BitVector, Insts: [0](0x{{[0-9a-f]+}} )>
+// AUTH-ORACLES: BB Layout : [[BB0]]
+// AUTH-ORACLES-NEXT: }
+// AUTH-ORACLES-NEXT: [[BB0]] (2 instructions, align : 1)
+// AUTH-ORACLES-NEXT: Entry Point
+// AUTH-ORACLES-NEXT: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state<CannotEscapeUnchecked: BitVector, Insts: [0](0x{{[0-9a-f]+}})>
+// AUTH-ORACLES-NEXT: 00000004: ret # DataflowDstSafetyAnalysis: dst-state<CannotEscapeUnchecked: BitVector, Insts: [0]()>
+// AUTH-ORACLES-EMPTY:
+// AUTH-ORACLES-NEXT: DWARF CFI Instructions:
+// AUTH-ORACLES-NEXT: <empty>
+// AUTH-ORACLES-NEXT: End of Function "auth_oracle"
+// AUTH-ORACLES-EMPTY:
+// AUTH-ORACLES-NEXT: Attaching leakage info to: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state<CannotEscapeUnchecked: BitVector, Insts: [0](0x{{[0-9a-f]+}})>
// Gadget scanner should not crash on CFI instructions, including when debug-printing them.
// Note that the particular debug output is not checked, but BOLT should be
diff --git a/bolt/test/binary-analysis/AArch64/gs-pauth-signing-oracles.s b/bolt/test/binary-analysis/AArch64/gs-pauth-signing-oracles.s
index 4d4bb7b..7d908f2 100644
--- a/bolt/test/binary-analysis/AArch64/gs-pauth-signing-oracles.s
+++ b/bolt/test/binary-analysis/AArch64/gs-pauth-signing-oracles.s
@@ -1,6 +1,7 @@
// RUN: %clang %cflags -march=armv8.3-a+pauth-lr -Wl,--no-relax %s -o %t.exe
-// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s
-// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck %s
+// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s
+// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,NOFPAC %s
+// RUN: llvm-bolt-binary-analysis --scanners=pauth --auth-traps-on-failure %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,FPAC %s
// The detection of compiler-generated explicit pointer checks is tested in
// gs-pauth-address-checks.s, for that reason only test here "dummy-load" and
@@ -66,9 +67,10 @@ good_sign_auted_checked_brk:
.globl bad_sign_authed_unchecked
.type bad_sign_authed_unchecked,@function
bad_sign_authed_unchecked:
-// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_sign_authed_unchecked, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// FPAC-NOT: bad_sign_authed_unchecked
+// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_sign_authed_unchecked, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
autda x0, x2
pacda x0, x1
ret
@@ -266,9 +268,10 @@ bad_call_between_checked_and_used:
.globl bad_transition_check_then_auth
.type bad_transition_check_then_auth,@function
bad_transition_check_then_auth:
-// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_transition_check_then_auth, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// FPAC-NOT: bad_transition_check_then_auth
+// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_transition_check_then_auth, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
ldr x2, [x0]
autda x0, x2
pacda x0, x1
@@ -278,9 +281,10 @@ bad_transition_check_then_auth:
.globl bad_transition_auth_then_auth
.type bad_transition_auth_then_auth,@function
bad_transition_auth_then_auth:
-// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_transition_auth_then_auth, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// FPAC-NOT: bad_transition_auth_then_auth
+// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_transition_auth_then_auth, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
autda x0, x2
autda x0, x2
pacda x0, x1
@@ -363,9 +367,10 @@ good_sign_auted_checked_brk_multi_bb:
.globl bad_sign_authed_unchecked_multi_bb
.type bad_sign_authed_unchecked_multi_bb,@function
bad_sign_authed_unchecked_multi_bb:
-// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_sign_authed_unchecked_multi_bb, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// FPAC-NOT: bad_sign_authed_unchecked_multi_bb
+// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_sign_authed_unchecked_multi_bb, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
autda x0, x2
cbz x3, 1f
ldr x2, [x0]
@@ -534,9 +539,10 @@ good_sign_auted_checked_ldr_nocfg:
.globl bad_sign_authed_unchecked_nocfg
.type bad_sign_authed_unchecked_nocfg,@function
bad_sign_authed_unchecked_nocfg:
-// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_sign_authed_unchecked_nocfg, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// FPAC-NOT: bad_sign_authed_unchecked_nocfg
+// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_sign_authed_unchecked_nocfg, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
adr x3, 1f
br x3
1:
@@ -640,9 +646,10 @@ bad_clobber_between_checked_and_used_nocfg:
.globl bad_transition_check_then_auth_nocfg
.type bad_transition_check_then_auth_nocfg,@function
bad_transition_check_then_auth_nocfg:
-// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_transition_check_then_auth_nocfg, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// FPAC-NOT: bad_transition_check_then_auth_nocfg
+// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_transition_check_then_auth_nocfg, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
adr x3, 1f
br x3
1:
@@ -655,9 +662,10 @@ bad_transition_check_then_auth_nocfg:
.globl bad_transition_auth_then_auth_nocfg
.type bad_transition_auth_then_auth_nocfg,@function
bad_transition_auth_then_auth_nocfg:
-// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_transition_auth_then_auth_nocfg, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// FPAC-NOT: bad_transition_auth_then_auth_nocfg
+// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_transition_auth_then_auth_nocfg, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
adr x3, 1f
br x3
1:
diff --git a/bolt/test/binary-analysis/AArch64/gs-pauth-tail-calls.s b/bolt/test/binary-analysis/AArch64/gs-pauth-tail-calls.s
index 2d3c2f1..59b7d92 100644
--- a/bolt/test/binary-analysis/AArch64/gs-pauth-tail-calls.s
+++ b/bolt/test/binary-analysis/AArch64/gs-pauth-tail-calls.s
@@ -1,6 +1,7 @@
// RUN: %clang %cflags -Wl,--entry=_custom_start -march=armv8.3-a %s -o %t.exe
-// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s
-// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck %s
+// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s
+// RUN: llvm-bolt-binary-analysis --scanners=pauth --auth-traps-on-failure %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,FPAC %s
+// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,NOFPAC %s
// PACRET-NOT: untrusted link register found before tail call
@@ -89,19 +90,20 @@ bad_indirect_tailcall_not_auted:
.globl bad_direct_tailcall_untrusted
.type bad_direct_tailcall_untrusted,@function
bad_direct_tailcall_untrusted:
-// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_direct_tailcall_untrusted, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: b callee # TAILCALL
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
-// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_direct_tailcall_untrusted, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: The 1 instructions that leak the affected registers are:
-// CHECK-NEXT: 1. {{[0-9a-f]+}}: b callee # TAILCALL
-// CHECK-NEXT: This happens in the following basic block:
-// CHECK-NEXT: {{[0-9a-f]+}}: paciasp
-// CHECK-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]!
-// CHECK-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10
-// CHECK-NEXT: {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: {{[0-9a-f]+}}: b callee # TAILCALL
+// FPAC-NOT: bad_direct_tailcall_untrusted
+// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_direct_tailcall_untrusted, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: b callee # TAILCALL
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_direct_tailcall_untrusted, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: The 1 instructions that leak the affected registers are:
+// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: b callee # TAILCALL
+// NOFPAC-NEXT: This happens in the following basic block:
+// NOFPAC-NEXT: {{[0-9a-f]+}}: paciasp
+// NOFPAC-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]!
+// NOFPAC-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10
+// NOFPAC-NEXT: {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: {{[0-9a-f]+}}: b callee # TAILCALL
paciasp
stp x29, x30, [sp, #-0x10]!
ldp x29, x30, [sp], #0x10
@@ -114,19 +116,20 @@ bad_direct_tailcall_untrusted:
bad_plt_tailcall_untrusted:
// FIXME: Calls via PLT are disassembled incorrectly. Nevertheless, they are
// still detected as tail calls.
-// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_plt_tailcall_untrusted, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted # TAILCALL
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
-// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_plt_tailcall_untrusted, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: The 1 instructions that leak the affected registers are:
-// CHECK-NEXT: 1. {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted # TAILCALL
-// CHECK-NEXT: This happens in the following basic block:
-// CHECK-NEXT: {{[0-9a-f]+}}: paciasp
-// CHECK-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]!
-// CHECK-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10
-// CHECK-NEXT: {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted # TAILCALL
+// FPAC-NOT: bad_plt_tailcall_untrusted
+// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_plt_tailcall_untrusted, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted # TAILCALL
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_plt_tailcall_untrusted, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: The 1 instructions that leak the affected registers are:
+// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted # TAILCALL
+// NOFPAC-NEXT: This happens in the following basic block:
+// NOFPAC-NEXT: {{[0-9a-f]+}}: paciasp
+// NOFPAC-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]!
+// NOFPAC-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10
+// NOFPAC-NEXT: {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted # TAILCALL
paciasp
stp x29, x30, [sp, #-0x10]!
ldp x29, x30, [sp], #0x10
@@ -137,20 +140,21 @@ bad_plt_tailcall_untrusted:
.globl bad_indirect_tailcall_untrusted
.type bad_indirect_tailcall_untrusted,@function
bad_indirect_tailcall_untrusted:
-// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_indirect_tailcall_untrusted, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: br x0 # TAILCALL
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
-// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: The 1 instructions that leak the affected registers are:
-// CHECK-NEXT: 1. {{[0-9a-f]+}}: br x0 # TAILCALL
-// CHECK-NEXT: This happens in the following basic block:
-// CHECK-NEXT: {{[0-9a-f]+}}: paciasp
-// CHECK-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]!
-// CHECK-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10
-// CHECK-NEXT: {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: {{[0-9a-f]+}}: autia x0, x1
-// CHECK-NEXT: {{[0-9a-f]+}}: br x0 # TAILCALL
+// FPAC-NOT: bad_indirect_tailcall_untrusted
+// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_indirect_tailcall_untrusted, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: br x0 # TAILCALL
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: The 1 instructions that leak the affected registers are:
+// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: br x0 # TAILCALL
+// NOFPAC-NEXT: This happens in the following basic block:
+// NOFPAC-NEXT: {{[0-9a-f]+}}: paciasp
+// NOFPAC-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]!
+// NOFPAC-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10
+// NOFPAC-NEXT: {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: {{[0-9a-f]+}}: autia x0, x1
+// NOFPAC-NEXT: {{[0-9a-f]+}}: br x0 # TAILCALL
paciasp
stp x29, x30, [sp, #-0x10]!
ldp x29, x30, [sp], #0x10
@@ -251,13 +255,14 @@ bad_indirect_tailcall_not_auted_multi_bb:
.globl bad_direct_tailcall_untrusted_multi_bb
.type bad_direct_tailcall_untrusted_multi_bb,@function
bad_direct_tailcall_untrusted_multi_bb:
-// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_direct_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: b callee # TAILCALL
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
-// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_direct_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: The 1 instructions that leak the affected registers are:
-// CHECK-NEXT: 1. {{[0-9a-f]+}}: b callee # TAILCALL
+// FPAC-NOT: bad_direct_tailcall_untrusted_multi_bb
+// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_direct_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: b callee # TAILCALL
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_direct_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: The 1 instructions that leak the affected registers are:
+// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: b callee # TAILCALL
paciasp
stp x29, x30, [sp, #-0x10]!
ldp x29, x30, [sp], #0x10
@@ -271,12 +276,13 @@ bad_direct_tailcall_untrusted_multi_bb:
.globl bad_indirect_tailcall_untrusted_multi_bb
.type bad_indirect_tailcall_untrusted_multi_bb,@function
bad_indirect_tailcall_untrusted_multi_bb:
-// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_indirect_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: br x0 # UNKNOWN CONTROL FLOW
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
-// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: The 0 instructions that leak the affected registers are:
+// FPAC-NOT: bad_indirect_tailcall_untrusted_multi_bb
+// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_indirect_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: br x0 # UNKNOWN CONTROL FLOW
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: The 0 instructions that leak the affected registers are:
paciasp
stp x29, x30, [sp, #-0x10]!
ldp x29, x30, [sp], #0x10
@@ -397,13 +403,14 @@ bad_indirect_tailcall_not_auted_nocfg:
.globl bad_direct_tailcall_untrusted_nocfg
.type bad_direct_tailcall_untrusted_nocfg,@function
bad_direct_tailcall_untrusted_nocfg:
-// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_direct_tailcall_untrusted_nocfg, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: b callee # TAILCALL
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
-// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_direct_tailcall_untrusted_nocfg, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: The 1 instructions that leak the affected registers are:
-// CHECK-NEXT: 1. {{[0-9a-f]+}}: b callee # TAILCALL
+// FPAC-NOT: bad_direct_tailcall_untrusted_nocfg
+// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_direct_tailcall_untrusted_nocfg, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: b callee # TAILCALL
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_direct_tailcall_untrusted_nocfg, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: The 1 instructions that leak the affected registers are:
+// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: b callee # TAILCALL
paciasp
stp x29, x30, [sp, #-0x10]!
adr x3, 1f
@@ -419,13 +426,14 @@ bad_direct_tailcall_untrusted_nocfg:
bad_plt_tailcall_untrusted_nocfg:
// FIXME: Calls via PLT are disassembled incorrectly. Nevertheless, they are
// still detected as tail calls.
-// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_plt_tailcall_untrusted_nocfg, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted_nocfg # TAILCALL
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
-// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_plt_tailcall_untrusted_nocfg, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: The 1 instructions that leak the affected registers are:
-// CHECK-NEXT: 1. {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted_nocfg # TAILCALL
+// FPAC-NOT: bad_plt_tailcall_untrusted_nocfg
+// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_plt_tailcall_untrusted_nocfg, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted_nocfg # TAILCALL
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_plt_tailcall_untrusted_nocfg, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: The 1 instructions that leak the affected registers are:
+// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted_nocfg # TAILCALL
paciasp
stp x29, x30, [sp, #-0x10]!
adr x3, 1f
@@ -441,11 +449,12 @@ bad_plt_tailcall_untrusted_nocfg:
bad_indirect_tailcall_untrusted_nocfg:
// Known false negative: ignoring UNKNOWN CONTROL FLOW without CFG.
// Authentication oracle is found by a generic checker, though.
-// CHECK-NOT: untrusted link register{{.*}}bad_indirect_tailcall_untrusted_nocfg
-// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted_nocfg, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: The 0 instructions that leak the affected registers are:
-// CHECK-NOT: untrusted link register{{.*}}bad_indirect_tailcall_untrusted_nocfg
+// FPAC-NOT: bad_indirect_tailcall_untrusted_nocfg
+// NOFPAC-NOT: untrusted link register{{.*}}bad_indirect_tailcall_untrusted_nocfg
+// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted_nocfg, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: The 0 instructions that leak the affected registers are:
+// NOFPAC-NOT: untrusted link register{{.*}}bad_indirect_tailcall_untrusted_nocfg
paciasp
stp x29, x30, [sp, #-0x10]!
adr x3, 1f
@@ -515,19 +524,20 @@ good_indirect_tailcall_no_clobber_v83:
.globl bad_indirect_tailcall_untrusted_v83
.type bad_indirect_tailcall_untrusted_v83,@function
bad_indirect_tailcall_untrusted_v83:
-// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_indirect_tailcall_untrusted_v83, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: braa x0, x1 # TAILCALL
-// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are:
-// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted_v83, basic block {{[^,]+}}, at address
-// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: The 1 instructions that leak the affected registers are:
-// CHECK-NEXT: 1. {{[0-9a-f]+}}: braa x0, x1 # TAILCALL
-// CHECK-NEXT: This happens in the following basic block:
-// CHECK-NEXT: {{[0-9a-f]+}}: paciasp
-// CHECK-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]!
-// CHECK-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10
-// CHECK-NEXT: {{[0-9a-f]+}}: autiasp
-// CHECK-NEXT: {{[0-9a-f]+}}: braa x0, x1 # TAILCALL
+// FPAC-NOT: bad_indirect_tailcall_untrusted_v83
+// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_indirect_tailcall_untrusted_v83, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: braa x0, x1 # TAILCALL
+// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are:
+// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted_v83, basic block {{[^,]+}}, at address
+// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: The 1 instructions that leak the affected registers are:
+// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: braa x0, x1 # TAILCALL
+// NOFPAC-NEXT: This happens in the following basic block:
+// NOFPAC-NEXT: {{[0-9a-f]+}}: paciasp
+// NOFPAC-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]!
+// NOFPAC-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10
+// NOFPAC-NEXT: {{[0-9a-f]+}}: autiasp
+// NOFPAC-NEXT: {{[0-9a-f]+}}: braa x0, x1 # TAILCALL
paciasp
stp x29, x30, [sp, #-0x10]!
ldp x29, x30, [sp], #0x10
diff --git a/clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp b/clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp
index b895075..0ac8f71 100644
--- a/clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp
+++ b/clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp
@@ -142,7 +142,7 @@ groupReplacements(const TUReplacements &TUs, const TUDiagnostics &TUDs,
// build directories, make them absolute immediately.
SmallString<128> Path = R.getFilePath();
if (BuildDir)
- llvm::sys::fs::make_absolute(*BuildDir, Path);
+ llvm::sys::path::make_absolute(*BuildDir, Path);
else
SM.getFileManager().makeAbsolutePath(Path);
diff --git a/clang-tools-extra/clang-include-fixer/IncludeFixer.cpp b/clang-tools-extra/clang-include-fixer/IncludeFixer.cpp
index d2ae13c..e825547 100644
--- a/clang-tools-extra/clang-include-fixer/IncludeFixer.cpp
+++ b/clang-tools-extra/clang-include-fixer/IncludeFixer.cpp
@@ -96,7 +96,7 @@ bool IncludeFixerActionFactory::runInvocation(
// diagnostics here.
Compiler.createDiagnostics(new clang::IgnoringDiagConsumer,
/*ShouldOwnClient=*/true);
- Compiler.createSourceManager(*Files);
+ Compiler.createSourceManager();
// We abort on fatal errors so don't let a large number of errors become
// fatal. A missing #include can cause thousands of errors.
diff --git a/clang-tools-extra/clang-move/Move.cpp b/clang-tools-extra/clang-move/Move.cpp
index 17f5971..519d3599 100644
--- a/clang-tools-extra/clang-move/Move.cpp
+++ b/clang-tools-extra/clang-move/Move.cpp
@@ -75,7 +75,7 @@ std::string MakeAbsolutePath(StringRef CurrentDir, StringRef Path) {
return "";
llvm::SmallString<128> InitialDirectory(CurrentDir);
llvm::SmallString<128> AbsolutePath(Path);
- llvm::sys::fs::make_absolute(InitialDirectory, AbsolutePath);
+ llvm::sys::path::make_absolute(InitialDirectory, AbsolutePath);
return CleanPath(std::move(AbsolutePath));
}
diff --git a/clang-tools-extra/clangd/ConfigCompile.cpp b/clang-tools-extra/clangd/ConfigCompile.cpp
index 962a48b..18e3180 100644
--- a/clang-tools-extra/clangd/ConfigCompile.cpp
+++ b/clang-tools-extra/clangd/ConfigCompile.cpp
@@ -131,7 +131,7 @@ struct FragmentCompiler {
return std::nullopt;
}
llvm::SmallString<256> AbsPath = llvm::StringRef(*Path);
- llvm::sys::fs::make_absolute(FragmentDirectory, AbsPath);
+ llvm::sys::path::make_absolute(FragmentDirectory, AbsPath);
llvm::sys::path::native(AbsPath, Style);
return AbsPath.str().str();
}
diff --git a/clang-tools-extra/clangd/SystemIncludeExtractor.cpp b/clang-tools-extra/clangd/SystemIncludeExtractor.cpp
index 106de1b..4a5cd3b 100644
--- a/clang-tools-extra/clangd/SystemIncludeExtractor.cpp
+++ b/clang-tools-extra/clangd/SystemIncludeExtractor.cpp
@@ -106,7 +106,7 @@ struct DriverArgs {
// relative or absolute).
if (llvm::any_of(Driver,
[](char C) { return llvm::sys::path::is_separator(C); })) {
- llvm::sys::fs::make_absolute(Cmd.Directory, Driver);
+ llvm::sys::path::make_absolute(Cmd.Directory, Driver);
}
this->Driver = Driver.str().str();
for (size_t I = 0, E = Cmd.CommandLine.size(); I < E; ++I) {
diff --git a/clang-tools-extra/clangd/index/SymbolCollector.cpp b/clang-tools-extra/clangd/index/SymbolCollector.cpp
index 6bdb108..39c479b 100644
--- a/clang-tools-extra/clangd/index/SymbolCollector.cpp
+++ b/clang-tools-extra/clangd/index/SymbolCollector.cpp
@@ -325,7 +325,7 @@ private:
if (R.second) {
llvm::SmallString<256> AbsPath = Path;
if (!llvm::sys::path::is_absolute(AbsPath) && !FallbackDir.empty())
- llvm::sys::fs::make_absolute(FallbackDir, AbsPath);
+ llvm::sys::path::make_absolute(FallbackDir, AbsPath);
assert(llvm::sys::path::is_absolute(AbsPath) &&
"If the VFS can't make paths absolute, a FallbackDir must be "
"provided");
diff --git a/clang-tools-extra/clangd/tool/ClangdMain.cpp b/clang-tools-extra/clangd/tool/ClangdMain.cpp
index 4de2f21..4a990f8 100644
--- a/clang-tools-extra/clangd/tool/ClangdMain.cpp
+++ b/clang-tools-extra/clangd/tool/ClangdMain.cpp
@@ -578,7 +578,7 @@ public:
Body = Body.ltrim('/');
llvm::SmallString<16> Path(Body);
path::native(Path);
- fs::make_absolute(TestScheme::TestDir, Path);
+ path::make_absolute(TestScheme::TestDir, Path);
return std::string(Path);
}
diff --git a/clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp b/clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp
index 372ab5f..fefbfc3 100644
--- a/clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp
+++ b/clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp
@@ -344,7 +344,7 @@ mapInputsToAbsPaths(clang::tooling::CompilationDatabase &CDB,
}
for (const auto &Cmd : Cmds) {
llvm::SmallString<256> CDBPath(Cmd.Filename);
- llvm::sys::fs::make_absolute(Cmd.Directory, CDBPath);
+ llvm::sys::path::make_absolute(Cmd.Directory, CDBPath);
CDBToAbsPaths[std::string(CDBPath)] = std::string(AbsPath);
}
}
diff --git a/clang-tools-extra/include-cleaner/unittests/RecordTest.cpp b/clang-tools-extra/include-cleaner/unittests/RecordTest.cpp
index 3fb49796..cbf7bae 100644
--- a/clang-tools-extra/include-cleaner/unittests/RecordTest.cpp
+++ b/clang-tools-extra/include-cleaner/unittests/RecordTest.cpp
@@ -649,11 +649,12 @@ TEST_F(PragmaIncludeTest, ExportInUnnamedBuffer) {
Clang->createVirtualFileSystem(VFS);
Clang->createDiagnostics();
- auto *FM = Clang->createFileManager();
+ Clang->createFileManager();
+ FileManager &FM = Clang->getFileManager();
ASSERT_TRUE(Clang->ExecuteAction(*Inputs.MakeAction()));
EXPECT_THAT(
- PI.getExporters(llvm::cantFail(FM->getFileRef("foo.h")), *FM),
- testing::ElementsAre(llvm::cantFail(FM->getFileRef("exporter.h"))));
+ PI.getExporters(llvm::cantFail(FM.getFileRef("foo.h")), FM),
+ testing::ElementsAre(llvm::cantFail(FM.getFileRef("exporter.h"))));
}
TEST_F(PragmaIncludeTest, OutlivesFMAndSM) {
diff --git a/clang/.clang-format b/clang/.clang-format
index 9b3aa8b..ecb44bf 100644
--- a/clang/.clang-format
+++ b/clang/.clang-format
@@ -1 +1,2 @@
BasedOnStyle: LLVM
+LineEnding: LF
diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt
index 4eaa7128..e4cb1a3 100644
--- a/clang/CMakeLists.txt
+++ b/clang/CMakeLists.txt
@@ -754,11 +754,22 @@ if (CLANG_ENABLE_BOOTSTRAP)
if(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED)
add_dependencies(clang-bootstrap-deps llvm-profdata)
set(PGO_OPT -DLLVM_PROFDATA=${LLVM_RUNTIME_OUTPUT_INTDIR}/llvm-profdata)
+ string(TOUPPER "${BOOTSTRAP_LLVM_BUILD_INSTRUMENTED}" BOOTSTRAP_LLVM_BUILD_INSTRUMENTED)
+ if (BOOTSTRAP_LLVM_BUILD_INSTRUMENTED STREQUAL "CSSPGO")
+ add_dependencies(clang-bootstrap-deps llvm-profgen)
+ list(APPEND PGO_OPT -DLLVM_PROFGEN=${LLVM_RUNTIME_OUTPUT_INTDIR}/llvm-profgen)
+ endif()
endif()
if(LLVM_BUILD_INSTRUMENTED)
- add_dependencies(clang-bootstrap-deps generate-profdata)
- set(PGO_OPT -DLLVM_PROFDATA_FILE=${CMAKE_CURRENT_BINARY_DIR}/utils/perf-training/clang.profdata)
+ string(TOUPPER "${LLVM_BUILD_INSTRUMENTED}" LLVM_BUILD_INSTRUMENTED)
+ if (LLVM_BUILD_INSTRUMENTED STREQUAL "CSSPGO")
+ add_dependencies(clang-bootstrap-deps generate-sprofdata)
+ set(PGO_OPT -DLLVM_SPROFDATA_FILE=${CMAKE_CURRENT_BINARY_DIR}/utils/perf-training/clang.sprofdata)
+ else()
+ add_dependencies(clang-bootstrap-deps generate-profdata)
+ set(PGO_OPT -DLLVM_PROFDATA_FILE=${CMAKE_CURRENT_BINARY_DIR}/utils/perf-training/clang.profdata)
+ endif()
# Use the current tools for LTO instead of the instrumented ones
list(APPEND _BOOTSTRAP_DEFAULT_PASSTHROUGH
CMAKE_CXX_COMPILER
diff --git a/clang/cmake/caches/BOLT-CSSPGO.cmake b/clang/cmake/caches/BOLT-CSSPGO.cmake
new file mode 100644
index 0000000..b1c204a
--- /dev/null
+++ b/clang/cmake/caches/BOLT-CSSPGO.cmake
@@ -0,0 +1,3 @@
+set(BOLT_PGO_CMAKE_CACHE "CSSPGO" CACHE STRING "")
+set(BOOTSTRAP_CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "")
+include(${CMAKE_CURRENT_LIST_DIR}/BOLT-PGO.cmake)
diff --git a/clang/cmake/caches/BOLT-PGO.cmake b/clang/cmake/caches/BOLT-PGO.cmake
index 1a04ca9..cc9410f 100644
--- a/clang/cmake/caches/BOLT-PGO.cmake
+++ b/clang/cmake/caches/BOLT-PGO.cmake
@@ -1,3 +1,4 @@
+set(BOLT_PGO_CMAKE_CACHE "PGO" CACHE STRING "")
set(LLVM_ENABLE_PROJECTS "bolt;clang;lld" CACHE STRING "")
set(CLANG_BOOTSTRAP_TARGETS
@@ -14,4 +15,4 @@ set(BOOTSTRAP_CLANG_BOOTSTRAP_TARGETS
set(PGO_BUILD_CONFIGURATION
${CMAKE_CURRENT_LIST_DIR}/BOLT.cmake
CACHE STRING "")
-include(${CMAKE_CURRENT_LIST_DIR}/PGO.cmake)
+include(${CMAKE_CURRENT_LIST_DIR}/${BOLT_PGO_CMAKE_CACHE}.cmake)
diff --git a/clang/cmake/caches/CSSPGO.cmake b/clang/cmake/caches/CSSPGO.cmake
new file mode 100644
index 0000000..59e08a6
--- /dev/null
+++ b/clang/cmake/caches/CSSPGO.cmake
@@ -0,0 +1,2 @@
+set(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED "CSSPGO" CACHE STRING "")
+include(${CMAKE_CURRENT_LIST_DIR}/PGO.cmake)
diff --git a/clang/docs/InternalsManual.rst b/clang/docs/InternalsManual.rst
index bd74227..c677ddfa 100644
--- a/clang/docs/InternalsManual.rst
+++ b/clang/docs/InternalsManual.rst
@@ -2859,6 +2859,67 @@ This library is called by the :ref:`Parser library <Parser>` during parsing to
do semantic analysis of the input. For valid programs, Sema builds an AST for
parsed constructs.
+
+Concept Satisfaction Checking and Subsumption
+---------------------------------------------
+
+As per the C++ standard, constraints are `normalized <https://eel.is/c++draft/temp.constr.normal>`_
+and the normal form is used both for subsumption, and constraint checking.
+Both depend on a parameter mapping that substitutes lazily. In particular,
+we should not substitute in unused arguments.
+
+Clang follows the order of operations prescribed by the standard.
+
+Normalization happens prior to satisfaction and subsumption
+and is handled by ``NormalizedConstraint``.
+
+Clang preserves in the normalized form intermediate concept-ids
+(``ConceptIdConstraint``) This is used for diagnostics only and no substitution
+happens in a ConceptIdConstraint if its expression is satisfied.
+
+The normal form of the associated constraints of a declaration is cached in
+Sema::NormalizationCache such that it is only computed once.
+
+A ``NormalizedConstraint`` is a recursive data structure, where each node
+contains a parameter mapping, represented by the indexes of all parameter
+being used.
+
+Checking satisfaction is done by ``ConstraintSatisfactionChecker``, recursively
+walking ``NormalizedConstraint``. At each level, we substitute the outermost
+level of the template arguments referenced in the parameter mapping of a
+normalized expression (``MultiLevelTemplateArgumentList``).
+
+For the following example,
+
+.. code-block:: c++
+
+ template <typename T>
+ concept A = __is_same(T, int);
+
+ template <typename U>
+ concept B = A<U> && __is_same(U, int);
+
+The normal form of B is
+
+.. code-block:: c++
+
+ __is_same(T, int) /*T->U, innermost level*/
+ && __is_same(U, int) {U->U} /*T->U, outermost level*/
+
+After substitution in the mapping, we substitute in the constraint expression
+using that copy of the ``MultiLevelTemplateArgumentList``, and then evaluate it.
+
+Because this is expensive, it is cached in
+``UnsubstitutedConstraintSatisfactionCache``.
+
+Any error during satisfaction is recorded in ``ConstraintSatisfaction``.
+for nested requirements, ``ConstraintSatisfaction`` is stored (including
+diagnostics) in the AST, which is something we might want to improve.
+
+When an atomic constraint is not satified, we try to substitute into any
+enclosing concept-id using the same mechanism described above, for
+diagnostics purpose, and inject that in the ``ConstraintSatisfaction``.
+
.. _CodeGen:
The CodeGen Library
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 97799ae..d2e5bd2 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -160,6 +160,10 @@ C++23 Feature Support
C++20 Feature Support
^^^^^^^^^^^^^^^^^^^^^
+- Clang now normalizes constraints before checking whether they are satisfied, as mandated by the standard.
+ As a result, Clang no longer incorrectly diagnoses substitution failures in template arguments only
+ used in concept-ids, and produces better diagnostics for satisfaction failure. (#GH61811) (#GH135190)
+
C++17 Feature Support
^^^^^^^^^^^^^^^^^^^^^
@@ -361,7 +365,7 @@ Bug Fixes in This Version
first parameter. (#GH113323).
- Fixed a crash with incompatible pointer to integer conversions in designated
initializers involving string literals. (#GH154046)
-- Fix crash on CTAD for alias template. (#GH131342)
+- Fix crash on CTAD for alias template. (#GH131342), (#GH131408)
- Clang now emits a frontend error when a function marked with the `flatten` attribute
calls another function that requires target features not enabled in the caller. This
prevents a fatal error in the backend.
@@ -435,6 +439,7 @@ Bug Fixes to C++ Support
- Fix the result of `__builtin_is_implicit_lifetime` for types with a user-provided constructor. (#GH160610)
- Correctly deduce return types in ``decltype`` expressions. (#GH160497) (#GH56652) (#GH116319) (#GH161196)
- Fixed a crash in the pre-C++23 warning for attributes before a lambda declarator (#GH161070).
+- Fix a crash when attempting to deduce a deduction guide from a non deducible template template parameter. (#130604)
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -443,6 +448,7 @@ Bug Fixes to AST Handling
legal representation. This is fixed because ElaboratedTypes don't exist anymore. (#GH43179) (#GH68670) (#GH92757)
- Fix unrecognized html tag causing undesirable comment lexing (#GH152944)
- Fix comment lexing of special command names (#GH152943)
+- Use `extern` as a hint to continue parsing when recovering from a malformed declaration.
Miscellaneous Bug Fixes
^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/clang/include/clang/AST/ASTConcept.h b/clang/include/clang/AST/ASTConcept.h
index 72da005..f362f24 100644
--- a/clang/include/clang/AST/ASTConcept.h
+++ b/clang/include/clang/AST/ASTConcept.h
@@ -28,10 +28,20 @@ namespace clang {
class ConceptDecl;
class TemplateDecl;
+class ConceptReference;
class Expr;
class NamedDecl;
struct PrintingPolicy;
+/// Unsatisfied constraint expressions if the template arguments could be
+/// substituted into them, or a diagnostic if substitution resulted in
+/// an invalid expression.
+///
+using ConstraintSubstitutionDiagnostic = std::pair<SourceLocation, StringRef>;
+using UnsatisfiedConstraintRecord =
+ llvm::PointerUnion<const Expr *, const ConceptReference *,
+ const ConstraintSubstitutionDiagnostic *>;
+
/// The result of a constraint satisfaction check, containing the necessary
/// information to diagnose an unsatisfied constraint.
class ConstraintSatisfaction : public llvm::FoldingSetNode {
@@ -48,16 +58,13 @@ public:
ArrayRef<TemplateArgument> TemplateArgs)
: ConstraintOwner(ConstraintOwner), TemplateArgs(TemplateArgs) {}
- using SubstitutionDiagnostic = std::pair<SourceLocation, StringRef>;
- using Detail = llvm::PointerUnion<Expr *, SubstitutionDiagnostic *>;
-
bool IsSatisfied = false;
bool ContainsErrors = false;
/// \brief The substituted constraint expr, if the template arguments could be
/// substituted into them, or a diagnostic if substitution resulted in an
/// invalid expression.
- llvm::SmallVector<Detail, 4> Details;
+ llvm::SmallVector<UnsatisfiedConstraintRecord, 4> Details;
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &C) {
Profile(ID, C, ConstraintOwner, TemplateArgs);
@@ -69,19 +76,12 @@ public:
bool HasSubstitutionFailure() {
for (const auto &Detail : Details)
- if (Detail.dyn_cast<SubstitutionDiagnostic *>())
+ if (Detail.dyn_cast<const ConstraintSubstitutionDiagnostic *>())
return true;
return false;
}
};
-/// Pairs of unsatisfied atomic constraint expressions along with the
-/// substituted constraint expr, if the template arguments could be
-/// substituted into them, or a diagnostic if substitution resulted in
-/// an invalid expression.
-using UnsatisfiedConstraintRecord =
- llvm::PointerUnion<Expr *, std::pair<SourceLocation, StringRef> *>;
-
/// \brief The result of a constraint satisfaction check, containing the
/// necessary information to diagnose an unsatisfied constraint.
///
@@ -101,6 +101,10 @@ struct ASTConstraintSatisfaction final :
return getTrailingObjects() + NumRecords;
}
+ ArrayRef<UnsatisfiedConstraintRecord> records() const {
+ return {begin(), end()};
+ }
+
ASTConstraintSatisfaction(const ASTContext &C,
const ConstraintSatisfaction &Satisfaction);
ASTConstraintSatisfaction(const ASTContext &C,
@@ -282,6 +286,11 @@ public:
}
};
+/// Insertion operator for diagnostics. This allows sending ConceptReferences's
+/// into a diagnostic with <<.
+const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
+ const ConceptReference *C);
+
} // clang
#endif // LLVM_CLANG_AST_ASTCONCEPT_H
diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h
index 12351e9..78220d4 100644
--- a/clang/include/clang/AST/ASTContext.h
+++ b/clang/include/clang/AST/ASTContext.h
@@ -3877,7 +3877,6 @@ typename clang::LazyGenerationalUpdatePtr<Owner, T, Update>::ValueType
return new (Ctx) LazyData(Source, Value);
return Value;
}
-
template <> struct llvm::DenseMapInfo<llvm::FoldingSetNodeID> {
static FoldingSetNodeID getEmptyKey() { return FoldingSetNodeID{}; }
diff --git a/clang/include/clang/AST/CharUnits.h b/clang/include/clang/AST/CharUnits.h
index c06354451..e570bfa 100644
--- a/clang/include/clang/AST/CharUnits.h
+++ b/clang/include/clang/AST/CharUnits.h
@@ -141,7 +141,7 @@ namespace clang {
/// Among other things, this promises that
/// self.alignTo(N) will just return self.
bool isMultipleOf(CharUnits N) const {
- return (*this % N) == 0;
+ return (*this % N) == CharUnits::Zero();
}
// Arithmetic operators.
@@ -165,8 +165,8 @@ namespace clang {
CharUnits operator% (QuantityType N) const {
return CharUnits(Quantity % N);
}
- QuantityType operator% (const CharUnits &Other) const {
- return Quantity % Other.Quantity;
+ CharUnits operator%(const CharUnits &Other) const {
+ return CharUnits(Quantity % Other.Quantity);
}
CharUnits operator+ (const CharUnits &Other) const {
return CharUnits(Quantity + Other.Quantity);
diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h
index d85d04d..406d79e 100644
--- a/clang/include/clang/AST/Decl.h
+++ b/clang/include/clang/AST/Decl.h
@@ -80,6 +80,7 @@ class TypeAliasTemplateDecl;
class UnresolvedSetImpl;
class VarTemplateDecl;
enum class ImplicitParamKind;
+struct UsualDeleteParams;
// Holds a constraint expression along with a pack expansion index, if
// expanded.
@@ -2646,6 +2647,8 @@ public:
bool isTypeAwareOperatorNewOrDelete() const;
void setIsTypeAwareOperatorNewOrDelete(bool IsTypeAwareOperator = true);
+ UsualDeleteParams getUsualDeleteParams() const;
+
/// Compute the language linkage.
LanguageLinkage getLanguageLinkage() const;
diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h
index 5f16bac..d78c7b6 100644
--- a/clang/include/clang/AST/ExprCXX.h
+++ b/clang/include/clang/AST/ExprCXX.h
@@ -2342,6 +2342,14 @@ struct ImplicitDeallocationParameters {
SizedDeallocationMode PassSize;
};
+/// The parameters to pass to a usual operator delete.
+struct UsualDeleteParams {
+ TypeAwareAllocationMode TypeAwareDelete = TypeAwareAllocationMode::No;
+ bool DestroyingDelete = false;
+ bool Size = false;
+ AlignedAllocationMode Alignment = AlignedAllocationMode::No;
+};
+
/// Represents a new-expression for memory allocation and constructor
/// calls, e.g: "new CXXNewExpr(foo)".
class CXXNewExpr final
diff --git a/clang/include/clang/AST/HLSLResource.h b/clang/include/clang/AST/HLSLResource.h
index e3ee0b1..9cdd81b 100644
--- a/clang/include/clang/AST/HLSLResource.h
+++ b/clang/include/clang/AST/HLSLResource.h
@@ -15,9 +15,12 @@
#define LLVM_CLANG_AST_HLSLRESOURCE_H
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/Attrs.inc"
#include "clang/AST/DeclBase.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
namespace clang {
diff --git a/clang/include/clang/AST/OpenACCClause.h b/clang/include/clang/AST/OpenACCClause.h
index 5f06117..58ba8d91 100644
--- a/clang/include/clang/AST/OpenACCClause.h
+++ b/clang/include/clang/AST/OpenACCClause.h
@@ -840,14 +840,13 @@ public:
// alloca at the level of the base, and the init at the element level.
struct OpenACCPrivateRecipe {
VarDecl *AllocaDecl;
- Expr *InitExpr;
- OpenACCPrivateRecipe(VarDecl *A, Expr *I) : AllocaDecl(A), InitExpr(I) {}
+ OpenACCPrivateRecipe(VarDecl *A) : AllocaDecl(A) {}
bool isSet() const { return AllocaDecl; }
static OpenACCPrivateRecipe Empty() {
- return OpenACCPrivateRecipe(nullptr, nullptr);
+ return OpenACCPrivateRecipe(/*AllocaDecl=*/nullptr);
}
};
@@ -899,18 +898,17 @@ public:
// InitFromTemporary is the 'temp' declaration we put in to be 'copied from'.
struct OpenACCFirstPrivateRecipe {
VarDecl *AllocaDecl;
- Expr *InitExpr;
VarDecl *InitFromTemporary;
- OpenACCFirstPrivateRecipe(VarDecl *A, Expr *I, VarDecl *T)
- : AllocaDecl(A), InitExpr(I), InitFromTemporary(T) {
- assert(!AllocaDecl || AllocaDecl->getInit() == nullptr);
+ OpenACCFirstPrivateRecipe(VarDecl *A, VarDecl *T)
+ : AllocaDecl(A), InitFromTemporary(T) {
assert(!InitFromTemporary || InitFromTemporary->getInit() == nullptr);
}
bool isSet() const { return AllocaDecl; }
static OpenACCFirstPrivateRecipe Empty() {
- return OpenACCFirstPrivateRecipe(nullptr, nullptr, nullptr);
+ return OpenACCFirstPrivateRecipe(/*AllocaDecl=*/nullptr,
+ /*InitFromTemporary=*/nullptr);
}
};
@@ -1282,16 +1280,13 @@ public:
// 'main' declaration used for initializaiton, which is fixed.
struct OpenACCReductionRecipe {
VarDecl *AllocaDecl;
- Expr *InitExpr;
// TODO: OpenACC: this should eventually have the operations here too.
- OpenACCReductionRecipe(VarDecl *A, Expr *I) : AllocaDecl(A), InitExpr(I) {
- assert(!AllocaDecl || AllocaDecl->getInit() == nullptr);
- }
+ OpenACCReductionRecipe(VarDecl *A) : AllocaDecl(A) {}
bool isSet() const { return AllocaDecl; }
static OpenACCReductionRecipe Empty() {
- return OpenACCReductionRecipe(nullptr, nullptr);
+ return OpenACCReductionRecipe(/*AllocaDecl=*/nullptr);
}
};
diff --git a/clang/include/clang/AST/TypeBase.h b/clang/include/clang/AST/TypeBase.h
index e0d00b8..6786b2f 100644
--- a/clang/include/clang/AST/TypeBase.h
+++ b/clang/include/clang/AST/TypeBase.h
@@ -6702,15 +6702,21 @@ public:
LLVM_PREFERRED_TYPE(bool)
uint8_t RawBuffer : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ uint8_t IsCounter : 1;
+
Attributes(llvm::dxil::ResourceClass ResourceClass, bool IsROV = false,
- bool RawBuffer = false)
- : ResourceClass(ResourceClass), IsROV(IsROV), RawBuffer(RawBuffer) {}
+ bool RawBuffer = false, bool IsCounter = false)
+ : ResourceClass(ResourceClass), IsROV(IsROV), RawBuffer(RawBuffer),
+ IsCounter(IsCounter) {}
- Attributes() : Attributes(llvm::dxil::ResourceClass::UAV, false, false) {}
+ Attributes()
+ : Attributes(llvm::dxil::ResourceClass::UAV, false, false, false) {}
friend bool operator==(const Attributes &LHS, const Attributes &RHS) {
- return std::tie(LHS.ResourceClass, LHS.IsROV, LHS.RawBuffer) ==
- std::tie(RHS.ResourceClass, RHS.IsROV, RHS.RawBuffer);
+ return std::tie(LHS.ResourceClass, LHS.IsROV, LHS.RawBuffer,
+ LHS.IsCounter) == std::tie(RHS.ResourceClass, RHS.IsROV,
+ RHS.RawBuffer, RHS.IsCounter);
}
friend bool operator!=(const Attributes &LHS, const Attributes &RHS) {
return !(LHS == RHS);
@@ -6751,6 +6757,7 @@ public:
ID.AddInteger(static_cast<uint32_t>(Attrs.ResourceClass));
ID.AddBoolean(Attrs.IsROV);
ID.AddBoolean(Attrs.RawBuffer);
+ ID.AddBoolean(Attrs.IsCounter);
}
static bool classof(const Type *T) {
diff --git a/clang/include/clang/AST/TypeProperties.td b/clang/include/clang/AST/TypeProperties.td
index b3932a6..9dc85fb 100644
--- a/clang/include/clang/AST/TypeProperties.td
+++ b/clang/include/clang/AST/TypeProperties.td
@@ -662,6 +662,9 @@ let Class = HLSLAttributedResourceType in {
def : Property<"rawBuffer", Bool> {
let Read = [{ node->getAttrs().RawBuffer }];
}
+ def : Property<"isCounter", Bool> {
+ let Read = [{ node->getAttrs().IsCounter }];
+ }
def : Property<"wrappedTy", QualType> {
let Read = [{ node->getWrappedType() }];
}
@@ -669,7 +672,7 @@ let Class = HLSLAttributedResourceType in {
let Read = [{ node->getContainedType() }];
}
def : Creator<[{
- HLSLAttributedResourceType::Attributes attrs(static_cast<llvm::dxil::ResourceClass>(resClass), isROV, rawBuffer);
+ HLSLAttributedResourceType::Attributes attrs(static_cast<llvm::dxil::ResourceClass>(resClass), isROV, rawBuffer, isCounter);
return ctx.getHLSLAttributedResourceType(wrappedTy, containedTy, attrs);
}]>;
}
diff --git a/clang/include/clang/Analysis/CFG.h b/clang/include/clang/Analysis/CFG.h
index 1b1ff5e..6dd7d13 100644
--- a/clang/include/clang/Analysis/CFG.h
+++ b/clang/include/clang/Analysis/CFG.h
@@ -1251,6 +1251,7 @@ public:
bool MarkElidedCXXConstructors = false;
bool AddVirtualBaseBranches = false;
bool OmitImplicitValueInitializers = false;
+ bool AssumeReachableDefaultInSwitchStatements = false;
BuildOptions() = default;
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index fe3ca70..3c697ed 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -5074,6 +5074,12 @@ def HLSLRawBuffer : TypeAttr {
let Documentation = [InternalOnly];
}
+def HLSLIsCounter : TypeAttr {
+ let Spellings = [CXX11<"hlsl", "is_counter">];
+ let LangOpts = [HLSL];
+ let Documentation = [InternalOnly];
+}
+
def HLSLGroupSharedAddressSpace : TypeAttr {
let Spellings = [CustomKeyword<"groupshared">];
let Subjects = SubjectList<[Var]>;
diff --git a/clang/include/clang/Basic/LangOptions.h b/clang/include/clang/Basic/LangOptions.h
index a8943df..41595ec 100644
--- a/clang/include/clang/Basic/LangOptions.h
+++ b/clang/include/clang/Basic/LangOptions.h
@@ -549,8 +549,7 @@ public:
bool CheckNew = false;
/// The HLSL root signature version for dxil.
- llvm::dxbc::RootSignatureVersion HLSLRootSigVer =
- llvm::dxbc::RootSignatureVersion::V1_1;
+ llvm::dxbc::RootSignatureVersion HLSLRootSigVer;
/// The HLSL root signature that will be used to overide the root signature
/// used for the shader entry point.
diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index 3f83c30..8a5bf03 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -148,9 +148,10 @@ public:
}
mlir::Value createComplexReal(mlir::Location loc, mlir::Value operand) {
- auto operandTy = mlir::cast<cir::ComplexType>(operand.getType());
- return cir::ComplexRealOp::create(*this, loc, operandTy.getElementType(),
- operand);
+ auto resultType = operand.getType();
+ if (auto complexResultType = mlir::dyn_cast<cir::ComplexType>(resultType))
+ resultType = complexResultType.getElementType();
+ return cir::ComplexRealOp::create(*this, loc, resultType, operand);
}
mlir::Value createComplexImag(mlir::Location loc, mlir::Value operand) {
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index e1be08c..0a78492 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -209,9 +209,10 @@ def CIR_CastOp : CIR_Op<"cast", [
Example:
```mlir
- %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool
+ %4 = cir.cast int_to_bool %3 : i32 -> !cir.bool
...
- %x = cir.cast(array_to_ptrdecay, %0 : !cir.ptr<!cir.array<i32 x 10>>), !cir.ptr<i32>
+ %x = cir.cast array_to_ptrdecay %0
+ : !cir.ptr<!cir.array<i32 x 10>> -> !cir.ptr<i32>
```
}];
@@ -219,8 +220,7 @@ def CIR_CastOp : CIR_Op<"cast", [
let results = (outs CIR_AnyType:$result);
let assemblyFormat = [{
- `(` $kind `,` $src `:` type($src) `)`
- `,` type($result) attr-dict
+ $kind $src `:` type($src) `->` type($result) attr-dict
}];
// The input and output types should match the cast kind.
@@ -1176,7 +1176,7 @@ def CIR_GotoOp : CIR_Op<"goto", [Terminator]> {
```mlir
cir.scope { // REGION #1
%2 = cir.load %0 : !cir.ptr<!s32i>, !s32i
- %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool
+ %3 = cir.cast int_to_bool %2 : !s32i -> !cir.bool
cir.if %3 {
cir.goto "label"
}
@@ -3260,18 +3260,20 @@ def CIR_ComplexCreateOp : CIR_Op<"complex.create", [Pure, SameTypeOperands]> {
def CIR_ComplexRealOp : CIR_Op<"complex.real", [Pure]> {
let summary = "Extract the real part of a complex value";
let description = [{
- `cir.complex.real` operation takes an operand of `!cir.complex` type and
- yields the real part of it.
+ `cir.complex.real` operation takes an operand of `!cir.complex`, `!cir.int`
+ or `!cir.float`. If the operand is `!cir.complex`, the real part of it will
+ be returned, otherwise the value returned unmodified.
Example:
```mlir
- %1 = cir.complex.real %0 : !cir.complex<!cir.float> -> !cir.float
+ %real = cir.complex.real %complex : !cir.complex<!cir.float> -> !cir.float
+ %real = cir.complex.real %scalar : !cir.float -> !cir.float
```
}];
let results = (outs CIR_AnyIntOrFloatType:$result);
- let arguments = (ins CIR_ComplexType:$operand);
+ let arguments = (ins CIR_AnyComplexOrIntOrFloatType:$operand);
let assemblyFormat = [{
$operand `:` qualified(type($operand)) `->` qualified(type($result))
@@ -3994,9 +3996,9 @@ def CIR_VAStartOp : CIR_Op<"va_start"> {
```mlir
// %args : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>
- %p = cir.cast(array_to_ptrdecay, %args
- : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>),
- !cir.ptr<!rec___va_list_tag>
+ %p = cir.cast array_to_ptrdecay %args
+ : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>)
+ -> !cir.ptr<!rec___va_list_tag>
%count = cir.load %0 : !cir.ptr<!s32i>, !s32i
cir.va_start %p %count : !cir.ptr<!rec___va_list_tag>, !s32i
```
@@ -4033,9 +4035,9 @@ def CIR_VAEndOp : CIR_Op<"va_end"> {
Example:
```mlir
// %args : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>
- %p = cir.cast(array_to_ptrdecay, %args
- : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>),
- !cir.ptr<!rec___va_list_tag>
+ %p = cir.cast array_to_ptrdecay %args
+ : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>
+ -> !cir.ptr<!rec___va_list_tag>
cir.va_end %p : !cir.ptr<!rec___va_list_tag>
```
}];
@@ -4068,9 +4070,9 @@ def CIR_VAArgOp : CIR_Op<"va_arg"> {
Example:
```mlir
// %args : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>
- %p = cir.cast(array_to_ptrdecay, %args
- : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>),
- !cir.ptr<!rec___va_list_tag>
+ %p = cir.cast array_to_ptrdecay %args
+ : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>
+ -> !cir.ptr<!rec___va_list_tag>
cir.va.start %p : !cir.ptr<!rec___va_list_tag>
// Fetch an `int` from the vararg list.
diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td b/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td
index 82f6e1d..da03a29 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td
@@ -165,6 +165,12 @@ def CIR_AnyIntOrFloatType : AnyTypeOf<[CIR_AnyFloatType, CIR_AnyIntType],
def CIR_AnyComplexType : CIR_TypeBase<"::cir::ComplexType", "complex type">;
+def CIR_AnyComplexOrIntOrFloatType : AnyTypeOf<[
+ CIR_AnyComplexType, CIR_AnyFloatType, CIR_AnyIntType
+], "complex, integer or floating point type"> {
+ let cppFunctionName = "isComplexOrIntegerOrFloatingPointType";
+}
+
//===----------------------------------------------------------------------===//
// Array Type predicates
//===----------------------------------------------------------------------===//
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 7a6c084..3dfcafc 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -133,7 +133,6 @@ struct MissingFeatures {
// RecordType
static bool skippedLayout() { return false; }
static bool astRecordDeclAttr() { return false; }
- static bool recordZeroInit() { return false; }
static bool recordZeroInitPadding() { return false; }
static bool zeroSizeRecordMembers() { return false; }
@@ -192,6 +191,7 @@ struct MissingFeatures {
static bool builtinCheckKind() { return false; }
static bool cgCapturedStmtInfo() { return false; }
static bool cgFPOptionsRAII() { return false; }
+ static bool checkBitfieldClipping() { return false; }
static bool cirgenABIInfo() { return false; }
static bool cleanupAfterErrorDiags() { return false; }
static bool cleanupsToDeactivate() { return false; }
diff --git a/clang/include/clang/Driver/CommonArgs.h b/clang/include/clang/Driver/CommonArgs.h
index 40ae406..23426c0 100644
--- a/clang/include/clang/Driver/CommonArgs.h
+++ b/clang/include/clang/Driver/CommonArgs.h
@@ -304,6 +304,11 @@ std::string complexRangeKindToStr(LangOptions::ComplexRangeKind Range);
// Render a frontend option corresponding to ComplexRangeKind.
std::string renderComplexRangeOption(LangOptions::ComplexRangeKind Range);
+// Set the complex range and output a warning as needed.
+void setComplexRange(const Driver &D, StringRef NewOpt,
+ LangOptions::ComplexRangeKind NewRange, StringRef &LastOpt,
+ LangOptions::ComplexRangeKind &Range);
+
} // end namespace tools
} // end namespace driver
} // end namespace clang
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 6245cf33..2ef6098 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -2750,6 +2750,9 @@ def fno_unsafe_math_optimizations : Flag<["-"], "fno-unsafe-math-optimizations">
Group<f_Group>;
def fassociative_math : Flag<["-"], "fassociative-math">, Visibility<[ClangOption, FlangOption]>, Group<f_Group>;
def fno_associative_math : Flag<["-"], "fno-associative-math">, Visibility<[ClangOption, FlangOption]>, Group<f_Group>;
+def fno_fast_real_mod : Flag<["-"], "fno-fast-real-mod">,
+ Group<f_Group>, Visibility<[FlangOption, FC1Option]>,
+ HelpText<"Disable optimization of MOD for REAL types in presence of -ffast-math">;
defm reciprocal_math : BoolFOption<"reciprocal-math",
LangOpts<"AllowRecip">, DefaultFalse,
PosFlag<SetTrue, [], [ClangOption, CC1Option, FC1Option, FlangOption],
@@ -9473,7 +9476,7 @@ def target_profile : DXCJoinedOrSeparate<"T">, MetaVarName<"<profile>">,
"lib_6_3, lib_6_4, lib_6_5, lib_6_6, lib_6_7, lib_6_x,"
"ms_6_5, ms_6_6, ms_6_7,"
"as_6_5, as_6_6, as_6_7,"
- "rootsig_1_0, rootsig_1_1">;
+ "rootsig_1_0, rootsig_1_1, rootsig_1_2">;
def emit_pristine_llvm : DXCFlag<"emit-pristine-llvm">,
HelpText<"Emit pristine LLVM IR from the frontend by not running any LLVM passes at all."
"Same as -S + -emit-llvm + -disable-llvm-passes.">;
@@ -9486,9 +9489,9 @@ def fdx_rootsignature_version :
Group<dxc_Group>,
Visibility<[ClangOption, CC1Option]>,
HelpText<"Root Signature Version">,
- Values<"rootsig_1_0,rootsig_1_1">,
+ Values<"rootsig_1_0,rootsig_1_1,rootsig_1_2">,
NormalizedValuesScope<"llvm::dxbc::RootSignatureVersion">,
- NormalizedValues<["V1_0", "V1_1"]>,
+ NormalizedValues<["V1_0", "V1_1", "V1_2"]>,
MarshallingInfoEnum<LangOpts<"HLSLRootSigVer">, "V1_1">;
def dxc_rootsig_ver :
Separate<["/", "-"], "force-rootsig-ver">,
diff --git a/clang/include/clang/Frontend/CompilerInstance.h b/clang/include/clang/Frontend/CompilerInstance.h
index a6b6993..44fff69 100644
--- a/clang/include/clang/Frontend/CompilerInstance.h
+++ b/clang/include/clang/Frontend/CompilerInstance.h
@@ -712,12 +712,10 @@ public:
const CodeGenOptions *CodeGenOpts = nullptr);
/// Create the file manager and replace any existing one with it.
- ///
- /// \return The new file manager on success, or null on failure.
- FileManager *createFileManager();
+ void createFileManager();
/// Create the source manager and replace any existing one with it.
- void createSourceManager(FileManager &FileMgr);
+ void createSourceManager();
/// Create the preprocessor, using the invocation, file, and source managers,
/// and replace any existing one with it.
diff --git a/clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def b/clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def
index a5cfeb3..1d7f7ad 100644
--- a/clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def
+++ b/clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def
@@ -65,6 +65,9 @@
#ifndef STATIC_BORDER_COLOR_ENUM
#define STATIC_BORDER_COLOR_ENUM(NAME, LIT) ENUM(NAME, LIT)
#endif
+#ifndef STATIC_SAMPLER_FLAG_ENUM
+#define STATIC_SAMPLER_FLAG_ENUM(NAME, LIT) ENUM(NAME, LIT)
+#endif
// General Tokens:
TOK(invalid, "invalid identifier")
@@ -228,6 +231,10 @@ STATIC_BORDER_COLOR_ENUM(OpaqueWhite, "STATIC_BORDER_COLOR_OPAQUE_WHITE")
STATIC_BORDER_COLOR_ENUM(OpaqueBlackUint, "STATIC_BORDER_COLOR_OPAQUE_BLACK_UINT")
STATIC_BORDER_COLOR_ENUM(OpaqueWhiteUint, "STATIC_BORDER_COLOR_OPAQUE_WHITE_UINT")
+// Root Descriptor Flag Enums:
+STATIC_SAMPLER_FLAG_ENUM(UintBorderColor, "UINT_BORDER_COLOR")
+STATIC_SAMPLER_FLAG_ENUM(NonNormalizedCoordinates, "NON_NORMALIZED_COORDINATES")
+
#undef STATIC_BORDER_COLOR_ENUM
#undef COMPARISON_FUNC_ENUM
#undef TEXTURE_ADDRESS_MODE_ENUM
@@ -237,6 +244,7 @@ STATIC_BORDER_COLOR_ENUM(OpaqueWhiteUint, "STATIC_BORDER_COLOR_OPAQUE_WHITE_UINT
#undef DESCRIPTOR_RANGE_FLAG_ENUM_OFF
#undef DESCRIPTOR_RANGE_FLAG_ENUM_ON
#undef ROOT_DESCRIPTOR_FLAG_ENUM
+#undef STATIC_SAMPLER_FLAG_ENUM
#undef ROOT_FLAG_ENUM
#undef DESCRIPTOR_RANGE_OFFSET_ENUM
#undef UNBOUNDED_ENUM
diff --git a/clang/include/clang/Parse/ParseHLSLRootSignature.h b/clang/include/clang/Parse/ParseHLSLRootSignature.h
index b06846f..8f91d7c 100644
--- a/clang/include/clang/Parse/ParseHLSLRootSignature.h
+++ b/clang/include/clang/Parse/ParseHLSLRootSignature.h
@@ -130,6 +130,7 @@ private:
std::optional<float> MaxLOD;
std::optional<uint32_t> Space;
std::optional<llvm::dxbc::ShaderVisibility> Visibility;
+ std::optional<llvm::dxbc::StaticSamplerFlags> Flags;
};
std::optional<ParsedStaticSamplerParams> parseStaticSamplerParams();
@@ -153,6 +154,8 @@ private:
parseRootDescriptorFlags(RootSignatureToken::Kind Context);
std::optional<llvm::dxbc::DescriptorRangeFlags>
parseDescriptorRangeFlags(RootSignatureToken::Kind Context);
+ std::optional<llvm::dxbc::StaticSamplerFlags>
+ parseStaticSamplerFlags(RootSignatureToken::Kind Context);
/// Use NumericLiteralParser to convert CurToken.NumSpelling into a unsigned
/// 32-bit integer
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index f53aafd..265462a 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -65,6 +65,7 @@
#include "clang/Sema/Redeclaration.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaBase.h"
+#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/APInt.h"
@@ -11694,8 +11695,9 @@ public:
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
- NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
- const TemplateArgumentListInfo *TemplateArgs);
+ NamedDecl *FoundDecl, TemplateDecl *NamedConcept,
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool DoCheckConstraintSatisfaction = true);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
void diagnoseMissingTemplateArguments(const CXXScopeSpec &SS,
@@ -12025,6 +12027,13 @@ public:
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
+ bool CheckTemplateArgumentList(
+ TemplateDecl *Template, TemplateParameterList *Params,
+ SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs,
+ const DefaultArguments &DefaultArgs, bool PartialTemplateArgs,
+ CheckTemplateArgumentInfo &CTAI, bool UpdateArgsWithConversions = true,
+ bool *ConstraintsNotSatisfied = nullptr);
+
bool CheckTemplateTypeArgument(
TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &SugaredConverted,
@@ -12783,6 +12792,18 @@ public:
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
+ /// Mark which template parameters are named in a given expression.
+ ///
+ /// Unlike MarkUsedTemplateParameters, this excludes parameter that
+ /// are used but not directly named by an expression - i.e. it excludes
+ /// any template parameter that denotes the type of a referenced NTTP.
+ ///
+ /// \param Used a bit vector whose elements will be set to \c true
+ /// to indicate when the corresponding template parameter will be
+ /// deduced.
+ void MarkUsedTemplateParametersForSubsumptionParameterMapping(
+ const Expr *E, unsigned Depth, llvm::SmallBitVector &Used);
+
/// Mark which template parameters can be deduced from a given
/// template argument list.
///
@@ -12799,6 +12820,9 @@ public:
void MarkUsedTemplateParameters(ArrayRef<TemplateArgument> TemplateArgs,
unsigned Depth, llvm::SmallBitVector &Used);
+ void MarkUsedTemplateParameters(ArrayRef<TemplateArgumentLoc> TemplateArgs,
+ unsigned Depth, llvm::SmallBitVector &Used);
+
void
MarkDeducedTemplateParameters(const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
@@ -13096,6 +13120,9 @@ public:
/// Whether we're substituting into constraints.
bool InConstraintSubstitution;
+ /// Whether we're substituting into the parameter mapping of a constraint.
+ bool InParameterMappingSubstitution;
+
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
@@ -13146,8 +13173,10 @@ public:
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false),
- InConstraintSubstitution(false), Entity(nullptr), Template(nullptr),
- TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
+ InConstraintSubstitution(false),
+ InParameterMappingSubstitution(false), Entity(nullptr),
+ Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
+ DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
@@ -13359,6 +13388,11 @@ public:
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
+ bool SubstTemplateArgumentsInParameterMapping(
+ ArrayRef<TemplateArgumentLoc> Args, SourceLocation BaseLoc,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateArgumentListInfo &Out, bool BuildPackExpansionTypes);
+
/// Retrieve the template argument list(s) that should be used to
/// instantiate the definition of the given declaration.
///
@@ -13820,6 +13854,12 @@ public:
CodeSynthesisContexts.back().InConstraintSubstitution;
}
+ bool inParameterMappingSubstitution() const {
+ return !CodeSynthesisContexts.empty() &&
+ CodeSynthesisContexts.back().InParameterMappingSubstitution &&
+ !inConstraintSubstitution();
+ }
+
using EntityPrinter = llvm::function_ref<void(llvm::raw_ostream &)>;
/// \brief create a Requirement::SubstitutionDiagnostic with only a
@@ -14704,6 +14744,10 @@ public:
SatisfactionStack.swap(NewSS);
}
+ using ConstrainedDeclOrNestedRequirement =
+ llvm::PointerUnion<const NamedDecl *,
+ const concepts::NestedRequirement *>;
+
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
@@ -14728,44 +14772,12 @@ public:
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
- const NamedDecl *Template,
+ ConstrainedDeclOrNestedRequirement Entity,
ArrayRef<AssociatedConstraint> AssociatedConstraints,
const MultiLevelTemplateArgumentList &TemplateArgLists,
- SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction) {
- llvm::SmallVector<Expr *, 4> Converted;
- return CheckConstraintSatisfaction(Template, AssociatedConstraints,
- Converted, TemplateArgLists,
- TemplateIDRange, Satisfaction);
- }
-
- /// \brief Check whether the given list of constraint expressions are
- /// satisfied (as if in a 'conjunction') given template arguments.
- /// Additionally, takes an empty list of Expressions which is populated with
- /// the instantiated versions of the ConstraintExprs.
- /// \param Template the template-like entity that triggered the constraints
- /// check (either a concept or a constrained entity).
- /// \param ConstraintExprs a list of constraint expressions, treated as if
- /// they were 'AND'ed together.
- /// \param ConvertedConstraints a out parameter that will get populated with
- /// the instantiated version of the ConstraintExprs if we successfully checked
- /// satisfaction.
- /// \param TemplateArgList the multi-level list of template arguments to
- /// substitute into the constraint expression. This should be relative to the
- /// top-level (hence multi-level), since we need to instantiate fully at the
- /// time of checking.
- /// \param TemplateIDRange The source range of the template id that
- /// caused the constraints check.
- /// \param Satisfaction if true is returned, will contain details of the
- /// satisfaction, with enough information to diagnose an unsatisfied
- /// expression.
- /// \returns true if an error occurred and satisfaction could not be checked,
- /// false otherwise.
- bool CheckConstraintSatisfaction(
- const NamedDecl *Template,
- ArrayRef<AssociatedConstraint> AssociatedConstraints,
- llvm::SmallVectorImpl<Expr *> &ConvertedConstraints,
- const MultiLevelTemplateArgumentList &TemplateArgList,
- SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
+ SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction,
+ const ConceptReference *TopLevelConceptId = nullptr,
+ Expr **ConvertedExpr = nullptr);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
@@ -14831,16 +14843,17 @@ public:
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
+ SourceLocation Loc = {},
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
- DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
+ DiagnoseUnsatisfiedConstraint(const ConceptSpecializationExpr *ConstraintExpr,
bool First = true);
const NormalizedConstraint *getNormalizedAssociatedConstraints(
- const NamedDecl *ConstrainedDecl,
+ ConstrainedDeclOrNestedRequirement Entity,
ArrayRef<AssociatedConstraint> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
@@ -14865,6 +14878,15 @@ public:
const NamedDecl *D1, ArrayRef<AssociatedConstraint> AC1,
const NamedDecl *D2, ArrayRef<AssociatedConstraint> AC2);
+ /// Cache the satisfaction of an atomic constraint.
+ /// The key is based on the unsubstituted expression and the parameter
+ /// mapping. This lets us not substituting the mapping more than once,
+ /// which is (very!) expensive.
+ /// FIXME: this should be private.
+ llvm::DenseMap<llvm::FoldingSetNodeID,
+ UnsubstitutedConstraintSatisfactionCacheResult>
+ UnsubstitutedConstraintSatisfactionCache;
+
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
@@ -14875,8 +14897,11 @@ private:
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
- llvm::DenseMap<const NamedDecl *, NormalizedConstraint *> NormalizationCache;
+ llvm::DenseMap<ConstrainedDeclOrNestedRequirement, NormalizedConstraint *>
+ NormalizationCache;
+ /// Cache whether the associated constraint of a declaration
+ /// is satisfied.
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
diff --git a/clang/include/clang/Sema/SemaConcept.h b/clang/include/clang/Sema/SemaConcept.h
index 648a9c5..51ca1e1 100644
--- a/clang/include/clang/Sema/SemaConcept.h
+++ b/clang/include/clang/Sema/SemaConcept.h
@@ -16,130 +16,406 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/Basic/SourceLocation.h"
+#include "clang/Sema/Ownership.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
#include <optional>
#include <utility>
namespace clang {
class Sema;
+class MultiLevelTemplateArgumentList;
-enum { ConstraintAlignment = 8 };
+/// \brief A normalized constraint, as defined in C++ [temp.constr.normal], is
+/// either an atomic constraint, a conjunction of normalized constraints or a
+/// disjunction of normalized constraints.
+struct NormalizedConstraint {
+
+ enum class ConstraintKind : unsigned char {
+ Atomic = 0,
+ ConceptId,
+ FoldExpanded,
+ Compound,
+ };
+
+ enum CompoundConstraintKind : unsigned char {
+ CCK_Conjunction,
+ CCK_Disjunction
+ };
+ enum class FoldOperatorKind : unsigned char { And, Or };
+
+ using OccurenceList = llvm::SmallBitVector;
+
+protected:
+ using ExprOrConcept =
+ llvm::PointerUnion<const Expr *, const ConceptReference *>;
+
+ struct AtomicConstraintBits {
+ // Kind is the first member of all union members,
+ // as we rely on their initial common sequence.
+ LLVM_PREFERRED_TYPE(ConstraintKind)
+ unsigned Kind : 5;
+ unsigned Placeholder : 1;
+ unsigned PackSubstitutionIndex : 26;
+ // Indexes, IndexesForSubsumption, and Args are part of the common initial
+ // sequences of constraints that do have a mapping.
+
+ // Indexes of the parameters used in a constraint expression.
+ OccurenceList Indexes;
+ // Indexes of the parameters named directly in a constraint expression.
+ // FIXME: we should try to reduce the size of this struct?
+ OccurenceList IndexesForSubsumption;
+
+ TemplateArgumentLoc *Args;
+ TemplateParameterList *ParamList;
+ ExprOrConcept ConstraintExpr;
+ const NamedDecl *ConstraintDecl;
+ };
+
+ struct FoldExpandedConstraintBits {
+ LLVM_PREFERRED_TYPE(ConstraintKind)
+ unsigned Kind : 5;
+ LLVM_PREFERRED_TYPE(FoldOperatorKind)
+ unsigned FoldOperator : 1;
+ unsigned Placeholder : 26;
+ OccurenceList Indexes;
+ OccurenceList IndexesForSubsumption;
+ TemplateArgumentLoc *Args;
+ TemplateParameterList *ParamList;
+ const Expr *Pattern;
+ const NamedDecl *ConstraintDecl;
+ NormalizedConstraint *Constraint;
+ };
+
+ struct ConceptIdBits : AtomicConstraintBits {
+ NormalizedConstraint *Sub;
+
+ // Only used for parameter mapping.
+ const ConceptSpecializationExpr *CSE;
+ };
+
+ struct CompoundConstraintBits {
+ LLVM_PREFERRED_TYPE(ConstraintKind)
+ unsigned Kind : 5;
+ LLVM_PREFERRED_TYPE(CompoundConstraintKind)
+ unsigned CCK : 1;
+ NormalizedConstraint *LHS;
+ NormalizedConstraint *RHS;
+ };
+
+ union {
+ AtomicConstraintBits Atomic;
+ FoldExpandedConstraintBits FoldExpanded;
+ ConceptIdBits ConceptId;
+ CompoundConstraintBits Compound;
+ };
+
+ ~NormalizedConstraint() {
+ if (getKind() != ConstraintKind::Compound)
+ Atomic.Indexes.llvm::SmallBitVector::~SmallBitVector();
+ }
+
+ NormalizedConstraint(const Expr *ConstraintExpr,
+ const NamedDecl *ConstraintDecl,
+ UnsignedOrNone PackIndex)
+ : Atomic{llvm::to_underlying(ConstraintKind::Atomic),
+ /*Placeholder=*/0,
+ PackIndex.toInternalRepresentation(),
+ /*Indexes=*/{},
+ /*IndexesForSubsumption=*/{},
+ /*Args=*/nullptr,
+ /*ParamList=*/nullptr,
+ ConstraintExpr,
+ ConstraintDecl} {}
+
+ NormalizedConstraint(const Expr *Pattern, FoldOperatorKind OpKind,
+ NormalizedConstraint *Constraint,
+ const NamedDecl *ConstraintDecl)
+ : FoldExpanded{llvm::to_underlying(ConstraintKind::FoldExpanded),
+ llvm::to_underlying(OpKind),
+ /*Placeholder=*/0,
+ /*Indexes=*/{},
+ /*IndexesForSubsumption=*/{},
+ /*Args=*/nullptr,
+ /*ParamList=*/nullptr,
+ Pattern,
+ ConstraintDecl,
+ Constraint} {}
+
+ NormalizedConstraint(const ConceptReference *ConceptId,
+ const NamedDecl *ConstraintDecl,
+ NormalizedConstraint *SubConstraint,
+ const ConceptSpecializationExpr *CSE,
+ UnsignedOrNone PackIndex)
+ : ConceptId{{llvm::to_underlying(ConstraintKind::ConceptId),
+ /*Placeholder=*/0, PackIndex.toInternalRepresentation(),
+ /*Indexes=*/{},
+ /*IndexesForSubsumption=*/{},
+ /*Args=*/nullptr, /*ParamList=*/nullptr, ConceptId,
+ ConstraintDecl},
+ SubConstraint,
+ CSE} {}
+
+ NormalizedConstraint(NormalizedConstraint *LHS, CompoundConstraintKind CCK,
+ NormalizedConstraint *RHS)
+ : Compound{llvm::to_underlying(ConstraintKind::Compound),
+ llvm::to_underlying(CCK), LHS, RHS} {}
+
+ bool hasParameterMapping() const {
+ // compound constraints do not have a mapping
+ // and Args is not part of their common initial sequence.
+ return getKind() != ConstraintKind::Compound && Atomic.Args != nullptr;
+ }
+
+ const OccurenceList &mappingOccurenceList() const {
+ assert(hasParameterMapping() && "This constraint has no parameter mapping");
+ return Atomic.Indexes;
+ }
+
+ const OccurenceList &mappingOccurenceListForSubsumption() const {
+ assert(hasParameterMapping() && "This constraint has no parameter mapping");
+ return Atomic.IndexesForSubsumption;
+ }
-struct alignas(ConstraintAlignment) AtomicConstraint {
- const Expr *ConstraintExpr;
- const NamedDecl *ConstraintDecl;
- std::optional<ArrayRef<TemplateArgumentLoc>> ParameterMapping;
+ llvm::MutableArrayRef<TemplateArgumentLoc> getParameterMapping() const {
+ return {Atomic.Args, Atomic.Indexes.count()};
+ }
+
+ TemplateParameterList *getUsedTemplateParamList() const {
+ return Atomic.ParamList;
+ }
- AtomicConstraint(const Expr *ConstraintExpr, const NamedDecl *ConstraintDecl)
- : ConstraintExpr(ConstraintExpr), ConstraintDecl(ConstraintDecl) {};
+ void updateParameterMapping(OccurenceList Indexes,
+ OccurenceList IndexesForSubsumption,
+ llvm::MutableArrayRef<TemplateArgumentLoc> Args,
+ TemplateParameterList *ParamList) {
+ assert(getKind() != ConstraintKind::Compound);
+ assert(Indexes.count() == Args.size());
+ assert(IndexesForSubsumption.size() == Indexes.size());
+ assert((Indexes | IndexesForSubsumption) == Indexes);
+
+ Atomic.IndexesForSubsumption = std::move(IndexesForSubsumption);
+ Atomic.Indexes = std::move(Indexes);
+ Atomic.Args = Args.data();
+ Atomic.ParamList = ParamList;
+ }
bool hasMatchingParameterMapping(ASTContext &C,
- const AtomicConstraint &Other) const {
- if (!ParameterMapping != !Other.ParameterMapping)
+ const NormalizedConstraint &Other) const {
+ assert(getKind() != ConstraintKind::Compound);
+
+ if (hasParameterMapping() != Other.hasParameterMapping())
return false;
- if (!ParameterMapping)
+ if (!hasParameterMapping())
return true;
- if (ParameterMapping->size() != Other.ParameterMapping->size())
- return false;
- for (unsigned I = 0, S = ParameterMapping->size(); I < S; ++I) {
+ llvm::ArrayRef<TemplateArgumentLoc> ParameterMapping =
+ getParameterMapping();
+ llvm::ArrayRef<TemplateArgumentLoc> OtherParameterMapping =
+ Other.getParameterMapping();
+
+ const OccurenceList &Indexes = mappingOccurenceListForSubsumption();
+ const OccurenceList &OtherIndexes =
+ Other.mappingOccurenceListForSubsumption();
+
+ if (ParameterMapping.size() != OtherParameterMapping.size())
+ return false;
+ for (unsigned I = 0, S = ParameterMapping.size(); I < S; ++I) {
+ if (Indexes[I] != OtherIndexes[I])
+ return false;
+ if (!Indexes[I])
+ continue;
llvm::FoldingSetNodeID IDA, IDB;
- C.getCanonicalTemplateArgument((*ParameterMapping)[I].getArgument())
+ C.getCanonicalTemplateArgument(ParameterMapping[I].getArgument())
.Profile(IDA, C);
- C.getCanonicalTemplateArgument((*Other.ParameterMapping)[I].getArgument())
+ C.getCanonicalTemplateArgument(OtherParameterMapping[I].getArgument())
.Profile(IDB, C);
if (IDA != IDB)
return false;
}
return true;
}
-};
-struct alignas(ConstraintAlignment) NormalizedConstraintPair;
-struct alignas(ConstraintAlignment) FoldExpandedConstraint;
+public:
+ ConstraintKind getKind() const {
+ return static_cast<ConstraintKind>(Atomic.Kind);
+ }
-/// \brief A normalized constraint, as defined in C++ [temp.constr.normal], is
-/// either an atomic constraint, a conjunction of normalized constraints or a
-/// disjunction of normalized constraints.
-struct NormalizedConstraint {
+ SourceLocation getBeginLoc() const {
+ switch (getKind()) {
+ case ConstraintKind::Atomic:
+ return cast<const Expr *>(Atomic.ConstraintExpr)->getBeginLoc();
+ case ConstraintKind::ConceptId:
+ return cast<const ConceptReference *>(Atomic.ConstraintExpr)
+ ->getBeginLoc();
+ case ConstraintKind::Compound:
+ return Compound.LHS->getBeginLoc();
+ case ConstraintKind::FoldExpanded:
+ return FoldExpanded.Pattern->getBeginLoc();
+ }
+ }
+
+ SourceLocation getEndLoc() const {
+ switch (getKind()) {
+ case ConstraintKind::Atomic:
+ return cast<const Expr *>(Atomic.ConstraintExpr)->getEndLoc();
+ case ConstraintKind::ConceptId:
+ return cast<const ConceptReference *>(Atomic.ConstraintExpr)->getEndLoc();
+ case ConstraintKind::Compound:
+ return Compound.RHS->getEndLoc();
+ case ConstraintKind::FoldExpanded:
+ return FoldExpanded.Pattern->getEndLoc();
+ }
+ }
+
+ SourceRange getSourceRange() const { return {getBeginLoc(), getEndLoc()}; }
+
+private:
friend class Sema;
+ static NormalizedConstraint *
+ fromAssociatedConstraints(Sema &S, const NamedDecl *D,
+ ArrayRef<AssociatedConstraint> ACs);
+ static NormalizedConstraint *fromConstraintExpr(Sema &S, const NamedDecl *D,
+ const Expr *E,
+ UnsignedOrNone SubstIndex);
+};
+
+class CompoundConstraint : public NormalizedConstraint {
+ using NormalizedConstraint::NormalizedConstraint;
- enum CompoundConstraintKind { CCK_Conjunction, CCK_Disjunction };
+public:
+ static CompoundConstraint *Create(ASTContext &Ctx, NormalizedConstraint *LHS,
+ CompoundConstraintKind CCK,
+ NormalizedConstraint *RHS) {
+ return new (Ctx) CompoundConstraint(LHS, CCK, RHS);
+ }
- using CompoundConstraint = llvm::PointerIntPair<NormalizedConstraintPair *, 1,
- CompoundConstraintKind>;
+ static CompoundConstraint *CreateConjunction(ASTContext &Ctx,
+ NormalizedConstraint *LHS,
+ NormalizedConstraint *RHS) {
+ return new (Ctx) CompoundConstraint(LHS, CCK_Conjunction, RHS);
+ }
- llvm::PointerUnion<AtomicConstraint *, FoldExpandedConstraint *,
- CompoundConstraint>
- Constraint;
+ const NormalizedConstraint &getLHS() const { return *Compound.LHS; }
- NormalizedConstraint(AtomicConstraint *C): Constraint{C} { };
- NormalizedConstraint(FoldExpandedConstraint *C) : Constraint{C} {};
+ NormalizedConstraint &getLHS() { return *Compound.LHS; }
- NormalizedConstraint(ASTContext &C, NormalizedConstraint LHS,
- NormalizedConstraint RHS, CompoundConstraintKind Kind);
+ const NormalizedConstraint &getRHS() const { return *Compound.RHS; }
- NormalizedConstraint(ASTContext &C, const NormalizedConstraint &Other);
- NormalizedConstraint(NormalizedConstraint &&Other):
- Constraint(Other.Constraint) {
- Other.Constraint = nullptr;
+ NormalizedConstraint &getRHS() { return *Compound.RHS; }
+
+ CompoundConstraintKind getCompoundKind() const {
+ return static_cast<CompoundConstraintKind>(Compound.CCK);
}
- NormalizedConstraint &operator=(const NormalizedConstraint &Other) = delete;
- NormalizedConstraint &operator=(NormalizedConstraint &&Other) {
- if (&Other != this) {
- NormalizedConstraint Temp(std::move(Other));
- std::swap(Constraint, Temp.Constraint);
- }
- return *this;
+};
+
+class NormalizedConstraintWithParamMapping : public NormalizedConstraint {
+protected:
+ using NormalizedConstraint::NormalizedConstraint;
+
+public:
+ using NormalizedConstraint::getParameterMapping;
+ using NormalizedConstraint::getUsedTemplateParamList;
+ using NormalizedConstraint::hasMatchingParameterMapping;
+ using NormalizedConstraint::hasParameterMapping;
+ using NormalizedConstraint::mappingOccurenceList;
+ using NormalizedConstraint::mappingOccurenceListForSubsumption;
+ using NormalizedConstraint::updateParameterMapping;
+
+ const NamedDecl *getConstraintDecl() const { return Atomic.ConstraintDecl; }
+
+ UnsignedOrNone getPackSubstitutionIndex() const {
+ return UnsignedOrNone::fromInternalRepresentation(
+ Atomic.PackSubstitutionIndex);
}
+};
+
+class AtomicConstraint : public NormalizedConstraintWithParamMapping {
+ using NormalizedConstraintWithParamMapping::
+ NormalizedConstraintWithParamMapping;
- bool isAtomic() const { return llvm::isa<AtomicConstraint *>(Constraint); }
- bool isFoldExpanded() const {
- return llvm::isa<FoldExpandedConstraint *>(Constraint);
+public:
+ static AtomicConstraint *Create(ASTContext &Ctx, const Expr *ConstraintExpr,
+ const NamedDecl *ConstraintDecl,
+ UnsignedOrNone PackIndex) {
+ return new (Ctx)
+ AtomicConstraint(ConstraintExpr, ConstraintDecl, PackIndex);
}
- bool isCompound() const { return llvm::isa<CompoundConstraint>(Constraint); }
- CompoundConstraintKind getCompoundKind() const;
+ const Expr *getConstraintExpr() const {
+ return cast<const Expr *>(Atomic.ConstraintExpr);
+ }
+};
- NormalizedConstraint &getLHS() const;
- NormalizedConstraint &getRHS() const;
+class FoldExpandedConstraint : public NormalizedConstraintWithParamMapping {
+ using NormalizedConstraintWithParamMapping::
+ NormalizedConstraintWithParamMapping;
- AtomicConstraint *getAtomicConstraint() const;
+public:
+ static FoldExpandedConstraint *Create(ASTContext &Ctx, const Expr *Pattern,
+ const NamedDecl *ConstraintDecl,
+ FoldOperatorKind OpKind,
+ NormalizedConstraint *Constraint) {
+ return new (Ctx)
+ FoldExpandedConstraint(Pattern, OpKind, Constraint, ConstraintDecl);
+ }
- FoldExpandedConstraint *getFoldExpandedConstraint() const;
+ using NormalizedConstraint::hasMatchingParameterMapping;
-private:
- static std::optional<NormalizedConstraint>
- fromAssociatedConstraints(Sema &S, const NamedDecl *D,
- ArrayRef<AssociatedConstraint> ACs);
- static std::optional<NormalizedConstraint>
- fromConstraintExpr(Sema &S, const NamedDecl *D, const Expr *E);
-};
+ FoldOperatorKind getFoldOperator() const {
+ return static_cast<FoldOperatorKind>(FoldExpanded.FoldOperator);
+ }
-struct alignas(ConstraintAlignment) NormalizedConstraintPair {
- NormalizedConstraint LHS, RHS;
-};
+ const Expr *getPattern() const { return FoldExpanded.Pattern; }
-struct alignas(ConstraintAlignment) FoldExpandedConstraint {
- enum class FoldOperatorKind { And, Or } Kind;
- NormalizedConstraint Constraint;
- const Expr *Pattern;
+ const NormalizedConstraint &getNormalizedPattern() const {
+ return *FoldExpanded.Constraint;
+ }
- FoldExpandedConstraint(FoldOperatorKind K, NormalizedConstraint C,
- const Expr *Pattern)
- : Kind(K), Constraint(std::move(C)), Pattern(Pattern) {};
+ NormalizedConstraint &getNormalizedPattern() {
+ return *FoldExpanded.Constraint;
+ }
static bool AreCompatibleForSubsumption(const FoldExpandedConstraint &A,
const FoldExpandedConstraint &B);
};
-const NormalizedConstraint *getNormalizedAssociatedConstraints(
- Sema &S, const NamedDecl *ConstrainedDecl,
- ArrayRef<AssociatedConstraint> AssociatedConstraints);
+class ConceptIdConstraint : public NormalizedConstraintWithParamMapping {
+ using NormalizedConstraintWithParamMapping::
+ NormalizedConstraintWithParamMapping;
+
+public:
+ static ConceptIdConstraint *
+ Create(ASTContext &Ctx, const ConceptReference *ConceptId,
+ NormalizedConstraint *SubConstraint, const NamedDecl *ConstraintDecl,
+ const ConceptSpecializationExpr *CSE, UnsignedOrNone PackIndex) {
+ return new (Ctx) ConceptIdConstraint(ConceptId, ConstraintDecl,
+ SubConstraint, CSE, PackIndex);
+ }
+
+ const ConceptSpecializationExpr *getConceptSpecializationExpr() const {
+ return ConceptId.CSE;
+ }
+
+ const ConceptReference *getConceptId() const {
+ return cast<const ConceptReference *>(ConceptId.ConstraintExpr);
+ }
+
+ const NormalizedConstraint &getNormalizedConstraint() const {
+ return *ConceptId.Sub;
+ }
+
+ NormalizedConstraint &getNormalizedConstraint() { return *ConceptId.Sub; }
+};
+
+struct UnsubstitutedConstraintSatisfactionCacheResult {
+ ExprResult SubstExpr;
+ ConstraintSatisfaction Satisfaction;
+};
/// \brief SubsumptionChecker establishes subsumption
/// between two set of constraints.
@@ -189,13 +465,13 @@ private:
};
struct MappedAtomicConstraint {
- AtomicConstraint *Constraint;
+ const AtomicConstraint *Constraint;
Literal ID;
};
struct FoldExpendedConstraintKey {
FoldExpandedConstraint::FoldOperatorKind Kind;
- AtomicConstraint *Constraint;
+ const AtomicConstraint *Constraint;
Literal ID;
};
@@ -207,7 +483,7 @@ private:
// A map from a literal to a corresponding associated constraint.
// We do not have enough bits left for a pointer union here :(
- llvm::DenseMap<uint16_t, void *> ReverseMap;
+ llvm::DenseMap<uint16_t, const void *> ReverseMap;
// Fold expanded constraints ask us to recursively establish subsumption.
// This caches the result.
@@ -234,12 +510,12 @@ private:
FormulaType Normalize(const NormalizedConstraint &C);
void AddUniqueClauseToFormula(Formula &F, Clause C);
- Literal find(AtomicConstraint *);
- Literal find(FoldExpandedConstraint *);
+ Literal find(const AtomicConstraint *);
+ Literal find(const FoldExpandedConstraint *);
uint16_t getNewLiteralId();
};
-} // clang
+} // namespace clang
#endif // LLVM_CLANG_SEMA_SEMACONCEPT_H
diff --git a/clang/include/clang/Sema/Template.h b/clang/include/clang/Sema/Template.h
index 115c19d..60c7d27 100644
--- a/clang/include/clang/Sema/Template.h
+++ b/clang/include/clang/Sema/Template.h
@@ -234,21 +234,25 @@ enum class TemplateSubstitutionKind : char {
/// Replaces the current 'innermost' level with the provided argument list.
/// This is useful for type deduction cases where we need to get the entire
/// list from the AST, but then add the deduced innermost list.
- void replaceInnermostTemplateArguments(Decl *AssociatedDecl, ArgList Args) {
+ void replaceInnermostTemplateArguments(Decl *AssociatedDecl, ArgList Args,
+ bool Final = false) {
assert((!TemplateArgumentLists.empty() || NumRetainedOuterLevels) &&
"Replacing in an empty list?");
if (!TemplateArgumentLists.empty()) {
- assert((TemplateArgumentLists[0].AssociatedDeclAndFinal.getPointer() ||
- TemplateArgumentLists[0].AssociatedDeclAndFinal.getPointer() ==
- AssociatedDecl) &&
- "Trying to change incorrect declaration?");
TemplateArgumentLists[0].Args = Args;
- } else {
- --NumRetainedOuterLevels;
- TemplateArgumentLists.push_back(
- {{AssociatedDecl, /*Final=*/false}, Args});
+ return;
}
+ --NumRetainedOuterLevels;
+ TemplateArgumentLists.push_back(
+ {{AssociatedDecl, /*Final=*/Final}, Args});
+ }
+
+ void replaceOutermostTemplateArguments(Decl *AssociatedDecl, ArgList Args) {
+ assert((!TemplateArgumentLists.empty()) && "Replacing in an empty list?");
+ TemplateArgumentLists.back().AssociatedDeclAndFinal.setPointer(
+ AssociatedDecl);
+ TemplateArgumentLists.back().Args = Args;
}
/// Add an outermost level that we are not substituting. We have no
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
index 5dcf03f7..c233ca1 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
@@ -1414,7 +1414,7 @@ class CallEventManager {
}
public:
- CallEventManager(llvm::BumpPtrAllocator &alloc) : Alloc(alloc) {}
+ CallEventManager(llvm::BumpPtrAllocator &alloc);
/// Gets an outside caller given a callee context.
CallEventRef<> getCaller(const StackFrameContext *CalleeCtx,
diff --git a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
index c3601a4..f222ded 100644
--- a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
+++ b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
@@ -220,7 +220,6 @@ private:
std::vector<std::string> VisibleModules;
std::vector<Command> Commands;
std::string ContextHash;
- std::vector<std::string> OutputPaths;
const llvm::DenseSet<ModuleID> &AlreadySeen;
};
diff --git a/clang/lib/AST/APValue.cpp b/clang/lib/AST/APValue.cpp
index 7173c2a..2e1c8eb 100644
--- a/clang/lib/AST/APValue.cpp
+++ b/clang/lib/AST/APValue.cpp
@@ -784,7 +784,7 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy,
if (!O.isZero()) {
if (IsReference)
Out << "*(";
- if (S.isZero() || O % S) {
+ if (S.isZero() || !O.isMultipleOf(S)) {
Out << "(char*)";
S = CharUnits::One();
}
diff --git a/clang/lib/AST/ASTConcept.cpp b/clang/lib/AST/ASTConcept.cpp
index d658890..fd12bc4 100644
--- a/clang/lib/AST/ASTConcept.cpp
+++ b/clang/lib/AST/ASTConcept.cpp
@@ -24,13 +24,18 @@ static void
CreateUnsatisfiedConstraintRecord(const ASTContext &C,
const UnsatisfiedConstraintRecord &Detail,
UnsatisfiedConstraintRecord *TrailingObject) {
- if (auto *E = dyn_cast<Expr *>(Detail))
+ if (Detail.isNull())
+ new (TrailingObject) UnsatisfiedConstraintRecord(nullptr);
+ else if (const auto *E = llvm::dyn_cast<const Expr *>(Detail))
new (TrailingObject) UnsatisfiedConstraintRecord(E);
+ else if (const auto *Concept =
+ llvm::dyn_cast<const ConceptReference *>(Detail))
+ new (TrailingObject) UnsatisfiedConstraintRecord(Concept);
else {
auto &SubstitutionDiagnostic =
- *cast<std::pair<SourceLocation, StringRef> *>(Detail);
+ *cast<const clang::ConstraintSubstitutionDiagnostic *>(Detail);
StringRef Message = C.backupStr(SubstitutionDiagnostic.second);
- auto *NewSubstDiag = new (C) std::pair<SourceLocation, StringRef>(
+ auto *NewSubstDiag = new (C) clang::ConstraintSubstitutionDiagnostic(
SubstitutionDiagnostic.first, Message);
new (TrailingObject) UnsatisfiedConstraintRecord(NewSubstDiag);
}
@@ -74,9 +79,10 @@ ASTConstraintSatisfaction *ASTConstraintSatisfaction::Rebuild(
return new (Mem) ASTConstraintSatisfaction(C, Satisfaction);
}
-void ConstraintSatisfaction::Profile(
- llvm::FoldingSetNodeID &ID, const ASTContext &C,
- const NamedDecl *ConstraintOwner, ArrayRef<TemplateArgument> TemplateArgs) {
+void ConstraintSatisfaction::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &C,
+ const NamedDecl *ConstraintOwner,
+ ArrayRef<TemplateArgument> TemplateArgs) {
ID.AddPointer(ConstraintOwner);
ID.AddInteger(TemplateArgs.size());
for (auto &Arg : TemplateArgs)
@@ -116,6 +122,19 @@ void ConceptReference::print(llvm::raw_ostream &OS,
}
}
+const StreamingDiagnostic &clang::operator<<(const StreamingDiagnostic &DB,
+ const ConceptReference *C) {
+ std::string NameStr;
+ llvm::raw_string_ostream OS(NameStr);
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ OS << '\'';
+ C->print(OS, PrintingPolicy(LO));
+ OS << '\'';
+ return DB << NameStr;
+}
+
concepts::ExprRequirement::ExprRequirement(
Expr *E, bool IsSimple, SourceLocation NoexceptLoc,
ReturnTypeRequirement Req, SatisfactionStatus Status,
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
index 1c8fd83..f43fa8c 100644
--- a/clang/lib/AST/ASTImporter.cpp
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -1069,22 +1069,22 @@ Error ASTNodeImporter::ImportConstraintSatisfaction(
ToSat.ContainsErrors = FromSat.ContainsErrors;
if (!ToSat.IsSatisfied) {
for (auto Record = FromSat.begin(); Record != FromSat.end(); ++Record) {
- if (Expr *E = Record->dyn_cast<Expr *>()) {
+ if (const Expr *E = Record->dyn_cast<const Expr *>()) {
ExpectedExpr ToSecondExpr = import(E);
if (!ToSecondExpr)
return ToSecondExpr.takeError();
ToSat.Details.emplace_back(ToSecondExpr.get());
} else {
- auto Pair = Record->dyn_cast<std::pair<SourceLocation, StringRef> *>();
+ auto Pair =
+ Record->dyn_cast<const ConstraintSubstitutionDiagnostic *>();
ExpectedSLoc ToPairFirst = import(Pair->first);
if (!ToPairFirst)
return ToPairFirst.takeError();
StringRef ToPairSecond = ImportASTStringRef(Pair->second);
- ToSat.Details.emplace_back(
- new (Importer.getToContext())
- ConstraintSatisfaction::SubstitutionDiagnostic{
- ToPairFirst.get(), ToPairSecond});
+ ToSat.Details.emplace_back(new (Importer.getToContext())
+ ConstraintSubstitutionDiagnostic{
+ ToPairFirst.get(), ToPairSecond});
}
}
}
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index 0b7b6cd..c71fd22 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -540,7 +540,8 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
if (const auto *IL = dyn_cast<IntegerLiteral>(SubExpr)) {
if (ToT != PT_IntAP && ToT != PT_IntAPS && FromT != PT_IntAP &&
FromT != PT_IntAPS && !CE->getType()->isEnumeralType())
- return this->emitConst(IL->getValue(), CE);
+ return this->emitConst(APSInt(IL->getValue(), !isSignedType(*FromT)),
+ CE);
if (!this->emitConst(IL->getValue(), SubExpr))
return false;
} else {
@@ -4541,7 +4542,14 @@ bool Compiler<Emitter>::emitConst(T Value, const Expr *E) {
template <class Emitter>
bool Compiler<Emitter>::emitConst(const APSInt &Value, PrimType Ty,
const Expr *E) {
- return this->emitConst(static_cast<const APInt &>(Value), Ty, E);
+ if (Ty == PT_IntAPS)
+ return this->emitConstIntAPS(Value, E);
+ if (Ty == PT_IntAP)
+ return this->emitConstIntAP(Value, E);
+
+ if (Value.isSigned())
+ return this->emitConst(Value.getSExtValue(), Ty, E);
+ return this->emitConst(Value.getZExtValue(), Ty, E);
}
template <class Emitter>
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index cd8e495..c734155 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -3552,6 +3552,53 @@ void FunctionDecl::setIsTypeAwareOperatorNewOrDelete(bool IsTypeAware) {
getASTContext().setIsTypeAwareOperatorNewOrDelete(this, IsTypeAware);
}
+UsualDeleteParams FunctionDecl::getUsualDeleteParams() const {
+ UsualDeleteParams Params;
+
+ // This function should only be called for operator delete declarations.
+ assert(getDeclName().isAnyOperatorDelete());
+ if (!getDeclName().isAnyOperatorDelete())
+ return Params;
+
+ const FunctionProtoType *FPT = getType()->castAs<FunctionProtoType>();
+ auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
+
+ if (isTypeAwareOperatorNewOrDelete()) {
+ Params.TypeAwareDelete = TypeAwareAllocationMode::Yes;
+ assert(AI != AE);
+ ++AI;
+ }
+
+ // The first argument after the type-identity parameter (if any) is
+ // always a void* (or C* for a destroying operator delete for class
+ // type C).
+ ++AI;
+
+ // The next parameter may be a std::destroying_delete_t.
+ if (isDestroyingOperatorDelete()) {
+ assert(!isTypeAwareAllocation(Params.TypeAwareDelete));
+ Params.DestroyingDelete = true;
+ assert(AI != AE);
+ ++AI;
+ }
+
+ // Figure out what other parameters we should be implicitly passing.
+ if (AI != AE && (*AI)->isIntegerType()) {
+ Params.Size = true;
+ ++AI;
+ } else
+ assert(!isTypeAwareAllocation(Params.TypeAwareDelete));
+
+ if (AI != AE && (*AI)->isAlignValT()) {
+ Params.Alignment = AlignedAllocationMode::Yes;
+ ++AI;
+ } else
+ assert(!isTypeAwareAllocation(Params.TypeAwareDelete));
+
+ assert(AI == AE && "unexpected usual deallocation function parameter");
+ return Params;
+}
+
LanguageLinkage FunctionDecl::getLanguageLinkage() const {
return getDeclLanguageLinkage(*this);
}
diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
index 2173aed..844db79 100644
--- a/clang/lib/AST/ItaniumMangle.cpp
+++ b/clang/lib/AST/ItaniumMangle.cpp
@@ -4624,6 +4624,8 @@ void CXXNameMangler::mangleType(const HLSLAttributedResourceType *T) {
Str += "_ROV";
if (Attrs.RawBuffer)
Str += "_Raw";
+ if (Attrs.IsCounter)
+ Str += "_Counter";
if (T->hasContainedType())
Str += "_CT";
mangleVendorQualifier(Str);
diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp
index 43f4e07..00b938b 100644
--- a/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -2087,9 +2087,8 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
if (InsertExtraPadding) {
CharUnits ASanAlignment = CharUnits::fromQuantity(8);
CharUnits ExtraSizeForAsan = ASanAlignment;
- if (FieldSize % ASanAlignment)
- ExtraSizeForAsan +=
- ASanAlignment - CharUnits::fromQuantity(FieldSize % ASanAlignment);
+ if (!FieldSize.isMultipleOf(ASanAlignment))
+ ExtraSizeForAsan += ASanAlignment - (FieldSize % ASanAlignment);
EffectiveFieldSize = FieldSize = FieldSize + ExtraSizeForAsan;
}
@@ -2119,10 +2118,10 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
if (RD->hasAttr<PackedAttr>() || !MaxFieldAlignment.isZero())
if (FieldAlign < OriginalFieldAlign)
if (D->getType()->isRecordType()) {
- // If the offset is a multiple of the alignment of
+ // If the offset is not a multiple of the alignment of
// the type, raise the warning.
// TODO: Takes no account the alignment of the outer struct
- if (FieldOffset % OriginalFieldAlign != 0)
+ if (!FieldOffset.isMultipleOf(OriginalFieldAlign))
Diag(D->getLocation(), diag::warn_unaligned_access)
<< Context.getCanonicalTagType(RD) << D->getName()
<< D->getType();
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index 589a156..f3b5478 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -2655,8 +2655,6 @@ void OpenACCClauseProfiler::VisitPrivateClause(
for (auto &Recipe : Clause.getInitRecipes()) {
Profiler.VisitDecl(Recipe.AllocaDecl);
- if (Recipe.InitExpr)
- Profiler.VisitExpr(Recipe.InitExpr);
}
}
@@ -2666,8 +2664,6 @@ void OpenACCClauseProfiler::VisitFirstPrivateClause(
for (auto &Recipe : Clause.getInitRecipes()) {
Profiler.VisitDecl(Recipe.AllocaDecl);
- if (Recipe.InitExpr)
- Profiler.VisitExpr(Recipe.InitExpr);
Profiler.VisitDecl(Recipe.InitFromTemporary);
}
}
@@ -2773,12 +2769,10 @@ void OpenACCClauseProfiler::VisitReductionClause(
for (auto &Recipe : Clause.getRecipes()) {
Profiler.VisitDecl(Recipe.AllocaDecl);
- if (Recipe.InitExpr)
- Profiler.VisitExpr(Recipe.InitExpr);
// TODO: OpenACC: Make sure we remember to update this when we figure out
// what we're adding for the operation recipe, in the meantime, a static
// assert will make sure we don't add something.
- static_assert(sizeof(OpenACCReductionRecipe) == 2 * sizeof(int *));
+ static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int *));
}
}
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index 8f7fe3b..cf5e914 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -3095,6 +3095,9 @@ void TextNodeDumper::VisitHLSLRootSignatureDecl(
case llvm::dxbc::RootSignatureVersion::V1_1:
OS << "1.1";
break;
+ case llvm::dxbc::RootSignatureVersion::V1_2:
+ OS << "1.2";
+ break;
}
OS << ", ";
llvm::hlsl::rootsig::dumpRootElements(OS, D->getRootElements());
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
index f3448af..66a1b68 100644
--- a/clang/lib/AST/TypePrinter.cpp
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -2062,6 +2062,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::HLSLROV:
case attr::HLSLRawBuffer:
case attr::HLSLContainedType:
+ case attr::HLSLIsCounter:
llvm_unreachable("HLSL resource type attributes handled separately");
case attr::OpenCLPrivateAddressSpace:
@@ -2210,6 +2211,8 @@ void TypePrinter::printHLSLAttributedResourceAfter(
OS << " [[hlsl::is_rov]]";
if (Attrs.RawBuffer)
OS << " [[hlsl::raw_buffer]]";
+ if (Attrs.IsCounter)
+ OS << " [[hlsl::is_counter]]";
QualType ContainedTy = T->getContainedType();
if (!ContainedTy.isNull()) {
diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp
index 60a2d11..cdde849 100644
--- a/clang/lib/Analysis/CFG.cpp
+++ b/clang/lib/Analysis/CFG.cpp
@@ -4516,10 +4516,13 @@ CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) {
//
// Note: We add a successor to a switch that is considered covered yet has no
// case statements if the enumeration has no enumerators.
+ // We also consider this successor reachable if
+ // BuildOpts.SwitchReqDefaultCoveredEnum is true.
bool SwitchAlwaysHasSuccessor = false;
SwitchAlwaysHasSuccessor |= switchExclusivelyCovered;
- SwitchAlwaysHasSuccessor |= Terminator->isAllEnumCasesCovered() &&
- Terminator->getSwitchCaseList();
+ SwitchAlwaysHasSuccessor |=
+ !BuildOpts.AssumeReachableDefaultInSwitchStatements &&
+ Terminator->isAllEnumCasesCovered() && Terminator->getSwitchCaseList();
addSuccessor(SwitchTerminatedBlock, DefaultCaseBlock,
!SwitchAlwaysHasSuccessor);
diff --git a/clang/lib/Analysis/ThreadSafety.cpp b/clang/lib/Analysis/ThreadSafety.cpp
index d19f86a..a56fdb1 100644
--- a/clang/lib/Analysis/ThreadSafety.cpp
+++ b/clang/lib/Analysis/ThreadSafety.cpp
@@ -419,22 +419,28 @@ public:
// The expression for this variable, OR
const Expr *Exp = nullptr;
- // Reference to another VarDefinition
- unsigned Ref = 0;
+ // Direct reference to another VarDefinition
+ unsigned DirectRef = 0;
+
+ // Reference to underlying canonical non-reference VarDefinition.
+ unsigned CanonicalRef = 0;
// The map with which Exp should be interpreted.
Context Ctx;
bool isReference() const { return !Exp; }
+ void invalidateRef() { DirectRef = CanonicalRef = 0; }
+
private:
// Create ordinary variable definition
VarDefinition(const NamedDecl *D, const Expr *E, Context C)
: Dec(D), Exp(E), Ctx(C) {}
// Create reference to previous definition
- VarDefinition(const NamedDecl *D, unsigned R, Context C)
- : Dec(D), Ref(R), Ctx(C) {}
+ VarDefinition(const NamedDecl *D, unsigned DirectRef, unsigned CanonicalRef,
+ Context C)
+ : Dec(D), DirectRef(DirectRef), CanonicalRef(CanonicalRef), Ctx(C) {}
};
private:
@@ -445,7 +451,7 @@ private:
public:
LocalVariableMap() {
// index 0 is a placeholder for undefined variables (aka phi-nodes).
- VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext()));
+ VarDefinitions.push_back(VarDefinition(nullptr, 0, 0, getEmptyContext()));
}
/// Look up a definition, within the given context.
@@ -471,7 +477,7 @@ public:
Ctx = VarDefinitions[i].Ctx;
return VarDefinitions[i].Exp;
}
- i = VarDefinitions[i].Ref;
+ i = VarDefinitions[i].DirectRef;
}
return nullptr;
}
@@ -508,7 +514,7 @@ public:
void dump() {
for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
const Expr *Exp = VarDefinitions[i].Exp;
- unsigned Ref = VarDefinitions[i].Ref;
+ unsigned Ref = VarDefinitions[i].DirectRef;
dumpVarDefinitionName(i);
llvm::errs() << " = ";
@@ -539,9 +545,9 @@ protected:
friend class VarMapBuilder;
// Resolve any definition ID down to its non-reference base ID.
- unsigned getCanonicalDefinitionID(unsigned ID) {
+ unsigned getCanonicalDefinitionID(unsigned ID) const {
while (ID > 0 && VarDefinitions[ID].isReference())
- ID = VarDefinitions[ID].Ref;
+ ID = VarDefinitions[ID].CanonicalRef;
return ID;
}
@@ -564,10 +570,11 @@ protected:
}
// Add a new reference to an existing definition.
- Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
+ Context addReference(const NamedDecl *D, unsigned Ref, Context Ctx) {
unsigned newID = VarDefinitions.size();
Context NewCtx = ContextFactory.add(Ctx, D, newID);
- VarDefinitions.push_back(VarDefinition(D, i, Ctx));
+ VarDefinitions.push_back(
+ VarDefinition(D, Ref, getCanonicalDefinitionID(Ref), Ctx));
return NewCtx;
}
@@ -769,15 +776,14 @@ void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
const unsigned *I2 = C2.lookup(P.first);
if (!I2) {
// Variable does not exist at the end of the loop, invalidate.
- VDef->Ref = 0;
+ VDef->invalidateRef();
continue;
}
// Compare the canonical IDs. This correctly handles chains of references
// and determines if the variable is truly loop-invariant.
- if (getCanonicalDefinitionID(VDef->Ref) != getCanonicalDefinitionID(*I2)) {
- VDef->Ref = 0; // Mark this variable as undefined
- }
+ if (VDef->CanonicalRef != getCanonicalDefinitionID(*I2))
+ VDef->invalidateRef(); // Mark this variable as undefined
}
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 10b8255..563a753 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -35,8 +35,8 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d,
getContext().getLangOpts().ElideConstructors && d.isNRVOVariable();
CIRGenFunction::AutoVarEmission emission(d);
- emission.IsEscapingByRef = d.isEscapingByref();
- if (emission.IsEscapingByRef)
+ emission.isEscapingByRef = d.isEscapingByref();
+ if (emission.isEscapingByRef)
cgm.errorNYI(d.getSourceRange(),
"emitAutoVarDecl: decl escaping by reference");
@@ -78,7 +78,7 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d,
alignment);
}
- emission.Addr = address;
+ emission.addr = address;
setAddrOfLocalVar(&d, address);
return emission;
@@ -101,13 +101,13 @@ bool CIRGenFunction::isTrivialInitializer(const Expr *init) {
void CIRGenFunction::emitAutoVarInit(
const CIRGenFunction::AutoVarEmission &emission) {
- assert(emission.Variable && "emission was not valid!");
+ assert(emission.variable && "emission was not valid!");
// If this was emitted as a global constant, we're done.
if (emission.wasEmittedAsGlobal())
return;
- const VarDecl &d = *emission.Variable;
+ const VarDecl &d = *emission.variable;
QualType type = d.getType();
@@ -124,7 +124,7 @@ void CIRGenFunction::emitAutoVarInit(
return;
}
- const Address addr = emission.Addr;
+ const Address addr = emission.addr;
// Check whether this is a byref variable that's potentially
// captured and moved by its own initializer. If so, we'll need to
@@ -153,7 +153,7 @@ void CIRGenFunction::emitAutoVarInit(
}
mlir::Attribute constant;
- if (emission.IsConstantAggregate ||
+ if (emission.isConstantAggregate ||
d.mightBeUsableInConstantExpressions(getContext())) {
// FIXME: Differently from LLVM we try not to emit / lower too much
// here for CIR since we are interested in seeing the ctor in some
@@ -196,7 +196,7 @@ void CIRGenFunction::emitAutoVarInit(
// FIXME(cir): migrate most of this file to use mlir::TypedAttr directly.
auto typedConstant = mlir::dyn_cast<mlir::TypedAttr>(constant);
assert(typedConstant && "expected typed attribute");
- if (!emission.IsConstantAggregate) {
+ if (!emission.isConstantAggregate) {
// For simple scalar/complex initialization, store the value directly.
LValue lv = makeAddrLValue(addr, type);
assert(init && "expected initializer");
@@ -209,7 +209,7 @@ void CIRGenFunction::emitAutoVarInit(
void CIRGenFunction::emitAutoVarCleanups(
const CIRGenFunction::AutoVarEmission &emission) {
- const VarDecl &d = *emission.Variable;
+ const VarDecl &d = *emission.variable;
// Check the type for a cleanup.
if (QualType::DestructionKind dtorKind = d.needsDestruction(getContext()))
@@ -821,7 +821,7 @@ void CIRGenFunction::emitAutoVarTypeCleanup(
// original stack object, not the possibly forwarded object.
Address addr = emission.getObjectAddress(*this);
- const VarDecl *var = emission.Variable;
+ const VarDecl *var = emission.variable;
QualType type = var->getType();
CleanupKind cleanupKind = NormalAndEHCleanup;
@@ -834,7 +834,7 @@ void CIRGenFunction::emitAutoVarTypeCleanup(
case QualType::DK_cxx_destructor:
// If there's an NRVO flag on the emission, we need a different
// cleanup.
- if (emission.NRVOFlag) {
+ if (emission.nrvoFlag) {
cgm.errorNYI(var->getSourceRange(), "emitAutoVarTypeCleanup: NRVO");
return;
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index af42d1d..1e987f3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -133,8 +133,7 @@ public:
}
void VisitParenExpr(ParenExpr *pe) { Visit(pe->getSubExpr()); }
void VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
- cgf.cgm.errorNYI(ge->getSourceRange(),
- "AggExprEmitter: VisitGenericSelectionExpr");
+ Visit(ge->getResultExpr());
}
void VisitCoawaitExpr(CoawaitExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoawaitExpr");
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 83208bf..7989ad2 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -210,60 +210,6 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorCall(
return emitCall(fnInfo, callee, returnValue, args, nullptr, loc);
}
-namespace {
-/// The parameters to pass to a usual operator delete.
-struct UsualDeleteParams {
- TypeAwareAllocationMode typeAwareDelete = TypeAwareAllocationMode::No;
- bool destroyingDelete = false;
- bool size = false;
- AlignedAllocationMode alignment = AlignedAllocationMode::No;
-};
-} // namespace
-
-// FIXME(cir): this should be shared with LLVM codegen
-static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *fd) {
- UsualDeleteParams params;
-
- const FunctionProtoType *fpt = fd->getType()->castAs<FunctionProtoType>();
- auto ai = fpt->param_type_begin(), ae = fpt->param_type_end();
-
- if (fd->isTypeAwareOperatorNewOrDelete()) {
- params.typeAwareDelete = TypeAwareAllocationMode::Yes;
- assert(ai != ae);
- ++ai;
- }
-
- // The first argument after the type-identity parameter (if any) is
- // always a void* (or C* for a destroying operator delete for class
- // type C).
- ++ai;
-
- // The next parameter may be a std::destroying_delete_t.
- if (fd->isDestroyingOperatorDelete()) {
- params.destroyingDelete = true;
- assert(ai != ae);
- ++ai;
- }
-
- // Figure out what other parameters we should be implicitly passing.
- if (ai != ae && (*ai)->isIntegerType()) {
- params.size = true;
- ++ai;
- } else {
- assert(!isTypeAwareAllocation(params.typeAwareDelete));
- }
-
- if (ai != ae && (*ai)->isAlignValT()) {
- params.alignment = AlignedAllocationMode::Yes;
- ++ai;
- } else {
- assert(!isTypeAwareAllocation(params.typeAwareDelete));
- }
-
- assert(ai == ae && "unexpected usual deallocation function parameter");
- return params;
-}
-
static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
unsigned minElements,
mlir::Value &numElements,
@@ -616,11 +562,11 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD,
const auto *deleteFTy = deleteFD->getType()->castAs<FunctionProtoType>();
CallArgList deleteArgs;
- UsualDeleteParams params = getUsualDeleteParams(deleteFD);
+ UsualDeleteParams params = deleteFD->getUsualDeleteParams();
auto paramTypeIt = deleteFTy->param_type_begin();
// Pass std::type_identity tag if present
- if (isTypeAwareAllocation(params.typeAwareDelete))
+ if (isTypeAwareAllocation(params.TypeAwareDelete))
cgm.errorNYI(deleteFD->getSourceRange(),
"emitDeleteCall: type aware delete");
@@ -631,12 +577,12 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD,
deleteArgs.add(RValue::get(deletePtr), argTy);
// Pass the std::destroying_delete tag if present.
- if (params.destroyingDelete)
+ if (params.DestroyingDelete)
cgm.errorNYI(deleteFD->getSourceRange(),
"emitDeleteCall: destroying delete");
// Pass the size if the delete function has a size_t parameter.
- if (params.size) {
+ if (params.Size) {
QualType sizeType = *paramTypeIt++;
CharUnits deleteTypeSize = getContext().getTypeSizeInChars(deleteTy);
assert(mlir::isa<cir::IntType>(convertType(sizeType)) &&
@@ -648,7 +594,7 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD,
}
// Pass the alignment if the delete function has an align_val_t parameter.
- if (isAlignedAllocation(params.alignment))
+ if (isAlignedAllocation(params.Alignment))
cgm.errorNYI(deleteFD->getSourceRange(),
"emitDeleteCall: aligned allocation");
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index f4bbced..500007f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -2151,8 +2151,10 @@ mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e,
}
if (e->getOpcode() == UO_Real) {
- return promotionTy.isNull() ? Visit(op)
- : cgf.emitPromotedScalarExpr(op, promotionTy);
+ mlir::Value operand = promotionTy.isNull()
+ ? Visit(op)
+ : cgf.emitPromotedScalarExpr(op, promotionTy);
+ return builder.createComplexReal(loc, operand);
}
// __imag on a scalar returns zero. Emit the subexpr to ensure side
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index a404c0c..b26b4f2 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -836,6 +836,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) {
return emitCallExprLValue(cast<CallExpr>(e));
case Expr::ParenExprClass:
return emitLValue(cast<ParenExpr>(e)->getSubExpr());
+ case Expr::GenericSelectionExprClass:
+ return emitLValue(cast<GenericSelectionExpr>(e)->getResultExpr());
case Expr::DeclRefExprClass:
return emitDeclRefLValue(cast<DeclRefExpr>(e));
case Expr::CStyleCastExprClass:
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index c0ed8b4..cb7cf98 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -479,55 +479,55 @@ public:
ConstantEmission tryEmitAsConstant(const MemberExpr *me);
struct AutoVarEmission {
- const clang::VarDecl *Variable;
+ const clang::VarDecl *variable;
/// The address of the alloca for languages with explicit address space
/// (e.g. OpenCL) or alloca casted to generic pointer for address space
/// agnostic languages (e.g. C++). Invalid if the variable was emitted
/// as a global constant.
- Address Addr;
+ Address addr;
/// True if the variable is of aggregate type and has a constant
/// initializer.
- bool IsConstantAggregate = false;
+ bool isConstantAggregate = false;
/// True if the variable is a __block variable that is captured by an
/// escaping block.
- bool IsEscapingByRef = false;
+ bool isEscapingByRef = false;
/// True if the variable was emitted as an offload recipe, and thus doesn't
/// have the same sort of alloca initialization.
- bool EmittedAsOffload = false;
+ bool emittedAsOffload = false;
- mlir::Value NRVOFlag{};
+ mlir::Value nrvoFlag{};
struct Invalid {};
- AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {}
+ AutoVarEmission(Invalid) : variable(nullptr), addr(Address::invalid()) {}
AutoVarEmission(const clang::VarDecl &variable)
- : Variable(&variable), Addr(Address::invalid()) {}
+ : variable(&variable), addr(Address::invalid()) {}
static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
- bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
+ bool wasEmittedAsGlobal() const { return !addr.isValid(); }
- bool wasEmittedAsOffloadClause() const { return EmittedAsOffload; }
+ bool wasEmittedAsOffloadClause() const { return emittedAsOffload; }
/// Returns the raw, allocated address, which is not necessarily
/// the address of the object itself. It is casted to default
/// address space for address space agnostic languages.
- Address getAllocatedAddress() const { return Addr; }
+ Address getAllocatedAddress() const { return addr; }
// Changes the stored address for the emission. This function should only
// be used in extreme cases, and isn't required to model normal AST
// initialization/variables.
- void setAllocatedAddress(Address A) { Addr = A; }
+ void setAllocatedAddress(Address a) { addr = a; }
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
/// __block decls.
Address getObjectAddress(CIRGenFunction &cgf) const {
- if (!IsEscapingByRef)
- return Addr;
+ if (!isEscapingByRef)
+ return addr;
assert(!cir::MissingFeatures::opAllocaEscapeByReference());
return Address::invalid();
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp
index 7f9350a..a9af753 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp
@@ -62,7 +62,7 @@ mlir::Value CIRGenFunction::createOpenACCConstantInt(mlir::Location loc,
auto constOp = builder.create<mlir::arith::ConstantOp>(
loc, builder.getIntegerAttr(ty, value));
- return constOp.getResult();
+ return constOp;
}
CIRGenFunction::OpenACCDataOperandInfo
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
index 3cf0534..3d86f71 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
@@ -110,7 +110,7 @@ class OpenACCClauseCIREmitter final
auto constOp = builder.create<mlir::arith::ConstantOp>(
loc, builder.getIntegerAttr(ty, value));
- return constOp.getResult();
+ return constOp;
}
mlir::Value createConstantInt(SourceLocation loc, unsigned width,
@@ -230,13 +230,13 @@ class OpenACCClauseCIREmitter final
std::is_same_v<AfterOpTy, mlir::acc::DetachOp>) {
// Detach/Delete ops don't have the variable reference here, so they
// take 1 fewer argument to their build function.
- afterOp = builder.create<AfterOpTy>(
- opInfo.beginLoc, beforeOp.getResult(), structured, implicit,
- opInfo.name, opInfo.bounds);
+ afterOp =
+ builder.create<AfterOpTy>(opInfo.beginLoc, beforeOp, structured,
+ implicit, opInfo.name, opInfo.bounds);
} else {
afterOp = builder.create<AfterOpTy>(
- opInfo.beginLoc, beforeOp.getResult(), opInfo.varValue, structured,
- implicit, opInfo.name, opInfo.bounds);
+ opInfo.beginLoc, beforeOp, opInfo.varValue, structured, implicit,
+ opInfo.name, opInfo.bounds);
}
}
@@ -1001,11 +1001,11 @@ public:
OpenACCRecipeBuilder<mlir::acc::PrivateRecipeOp>(cgf, builder)
.getOrCreateRecipe(
cgf.getContext(), recipeInsertLocation, varExpr,
- varRecipe.AllocaDecl, varRecipe.InitExpr,
+ varRecipe.AllocaDecl,
/*temporary=*/nullptr, OpenACCReductionOperator::Invalid,
Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType,
opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType,
- privateOp.getResult());
+ privateOp);
// TODO: OpenACC: The dialect is going to change in the near future to
// have these be on a different operation, so when that changes, we
// probably need to change these here.
@@ -1036,24 +1036,17 @@ public:
{
mlir::OpBuilder::InsertionGuard guardCase(builder);
- // TODO: OpenACC: At the moment this is a bit of a hacky way of doing
- // this, and won't work when we get to bounds/etc. Do this for now to
- // limit the scope of this refactor.
- VarDecl *allocaDecl = varRecipe.AllocaDecl;
- allocaDecl->setInit(varRecipe.InitExpr);
- allocaDecl->setInitStyle(VarDecl::CallInit);
auto recipe =
OpenACCRecipeBuilder<mlir::acc::FirstprivateRecipeOp>(cgf,
builder)
.getOrCreateRecipe(
cgf.getContext(), recipeInsertLocation, varExpr,
- varRecipe.AllocaDecl, varRecipe.InitExpr,
- varRecipe.InitFromTemporary,
+ varRecipe.AllocaDecl, varRecipe.InitFromTemporary,
OpenACCReductionOperator::Invalid,
Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType,
opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType,
- firstPrivateOp.getResult());
+ firstPrivateOp);
// TODO: OpenACC: The dialect is going to change in the near future to
// have these be on a different operation, so when that changes, we
@@ -1086,22 +1079,16 @@ public:
{
mlir::OpBuilder::InsertionGuard guardCase(builder);
- // TODO: OpenACC: At the moment this is a bit of a hacky way of doing
- // this, and won't work when we get to bounds/etc. Do this for now to
- // limit the scope of this refactor.
- VarDecl *allocaDecl = varRecipe.AllocaDecl;
- allocaDecl->setInit(varRecipe.InitExpr);
- allocaDecl->setInitStyle(VarDecl::CallInit);
auto recipe =
OpenACCRecipeBuilder<mlir::acc::ReductionRecipeOp>(cgf, builder)
.getOrCreateRecipe(
cgf.getContext(), recipeInsertLocation, varExpr,
- varRecipe.AllocaDecl, varRecipe.InitExpr,
+ varRecipe.AllocaDecl,
/*temporary=*/nullptr, clause.getReductionOp(),
Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType,
opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType,
- reductionOp.getResult());
+ reductionOp);
operation.addReduction(builder.getContext(), reductionOp, recipe);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
index fc28ac5..ea6ea2c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
@@ -36,6 +36,75 @@ mlir::Block *OpenACCRecipeBuilderBase::createRecipeBlock(mlir::Region &region,
llvm::SmallVector<mlir::Location> locs{types.size(), loc};
return builder.createBlock(&region, region.end(), types, locs);
}
+void OpenACCRecipeBuilderBase::makeAllocaCopy(mlir::Location loc,
+ mlir::Type copyType,
+ mlir::Value numEltsToCopy,
+ mlir::Value offsetPerSubarray,
+ mlir::Value destAlloca,
+ mlir::Value srcAlloca) {
+ mlir::OpBuilder::InsertionGuard guardCase(builder);
+
+ mlir::Type itrTy = cgf.cgm.convertType(cgf.getContext().UnsignedLongLongTy);
+ auto itrPtrTy = cir::PointerType::get(itrTy);
+ mlir::IntegerAttr itrAlign =
+ cgf.cgm.getSize(cgf.getContext().getTypeAlignInChars(
+ cgf.getContext().UnsignedLongLongTy));
+
+ auto loopBuilder = [&]() {
+ auto itr =
+ cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "itr", itrAlign);
+ cir::ConstantOp constZero = builder.getConstInt(loc, itrTy, 0);
+ builder.CIRBaseBuilderTy::createStore(loc, constZero, itr);
+ builder.createFor(
+ loc,
+ /*condBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ // itr < numEltsToCopy
+ // Enforce a trip count of 1 if there wasn't any element count, this
+ // way we can just use this loop with a constant bounds instead of a
+ // separate code path.
+ if (!numEltsToCopy)
+ numEltsToCopy = builder.getConstInt(loc, itrTy, 1);
+
+ auto loadCur = cir::LoadOp::create(builder, loc, {itr});
+ auto cmp = builder.createCompare(loc, cir::CmpOpKind::lt, loadCur,
+ numEltsToCopy);
+ builder.createCondition(cmp);
+ },
+ /*bodyBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ // destAlloca[itr] = srcAlloca[offsetPerSubArray * itr];
+ auto loadCur = cir::LoadOp::create(builder, loc, {itr});
+ auto srcOffset = builder.createMul(loc, offsetPerSubarray, loadCur);
+
+ auto ptrToOffsetIntoSrc = cir::PtrStrideOp::create(
+ builder, loc, copyType, srcAlloca, srcOffset);
+
+ auto offsetIntoDecayDest = cir::PtrStrideOp::create(
+ builder, loc, builder.getPointerTo(copyType), destAlloca,
+ loadCur);
+
+ builder.CIRBaseBuilderTy::createStore(loc, ptrToOffsetIntoSrc,
+ offsetIntoDecayDest);
+ builder.createYield(loc);
+ },
+ /*stepBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ // Simple increment of the iterator.
+ auto load = cir::LoadOp::create(builder, loc, {itr});
+ auto inc = cir::UnaryOp::create(builder, loc, load.getType(),
+ cir::UnaryOpKind::Inc, load);
+ builder.CIRBaseBuilderTy::createStore(loc, inc, itr);
+ builder.createYield(loc);
+ });
+ };
+
+ cir::ScopeOp::create(builder, loc,
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ loopBuilder();
+ builder.createYield(loc);
+ });
+}
mlir::Value OpenACCRecipeBuilderBase::makeBoundsAlloca(
mlir::Block *block, SourceRange exprRange, mlir::Location loc,
@@ -78,6 +147,10 @@ mlir::Value OpenACCRecipeBuilderBase::makeBoundsAlloca(
bool lastBoundWasArray = isArrayTy(boundTypes.back());
+ // Make sure we track a moving version of this so we can get our
+ // 'copying' back to correct.
+ mlir::Value lastAlloca = initialAlloca;
+
// Since we're iterating the types in reverse, this sets up for each index
// corresponding to the boundsRange to be the 'after application of the
// bounds.
@@ -125,14 +198,21 @@ mlir::Value OpenACCRecipeBuilderBase::makeBoundsAlloca(
mlir::Type eltTy = cgf.convertType(resultType);
cir::PointerType ptrTy = builder.getPointerTo(eltTy);
- builder.createAlloca(loc, ptrTy, eltTy, "openacc.init.bounds",
- cgf.getContext().getTypeAlignInChars(resultType),
- curSize);
-
- // TODO: OpenACC : At this point we should be copying the addresses of
- // each element of this to the last allocation. At the moment, that is
- // not yet implemented.
- cgf.cgm.errorNYI(exprRange, "OpenACC recipe alloca copying");
+ mlir::Value curAlloca = builder.createAlloca(
+ loc, ptrTy, eltTy, "openacc.init.bounds",
+ cgf.getContext().getTypeAlignInChars(resultType), curSize);
+
+ makeAllocaCopy(loc, ptrTy, cumulativeElts, eltsPerSubArray, lastAlloca,
+ curAlloca);
+ lastAlloca = curAlloca;
+ } else {
+ // In the case of an array, we just need to decay the pointer, so just do
+ // a zero-offset stride on the last alloca to decay it down an array
+ // level.
+ cir::ConstantOp constZero = builder.getConstInt(loc, itrTy, 0);
+ lastAlloca = builder.getArrayElement(loc, loc, lastAlloca,
+ cgf.convertType(resultType),
+ constZero, /*shouldDecay=*/true);
}
cumulativeElts = eltsToAlloca;
@@ -160,7 +240,7 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue,
if (auto arrayTy = dyn_cast<cir::ArrayType>(eltTy))
return builder.getArrayElement(loc, loc, subVal, arrayTy.getElementType(),
- idxLoad.getResult(),
+ idxLoad,
/*shouldDecay=*/true);
assert(isa<cir::PointerType>(eltTy));
@@ -168,8 +248,8 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue,
auto eltLoad = cir::LoadOp::create(builder, loc, {subVal});
return cir::PtrStrideOp::create(builder, loc, eltLoad.getType(), eltLoad,
- idxLoad.getResult())
- .getResult();
+ idxLoad);
+
};
auto forStmtBuilder = [&]() {
@@ -191,12 +271,11 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue,
if (inverse) {
cir::ConstantOp constOne = builder.getConstInt(loc, itrTy, 1);
- auto sub =
- cir::BinOp::create(builder, loc, itrTy, cir::BinOpKind::Sub,
- ubConversion.getResult(0), constOne.getResult());
+ auto sub = cir::BinOp::create(builder, loc, itrTy, cir::BinOpKind::Sub,
+ ubConversion.getResult(0), constOne);
// Upperbound is exclusive, so subtract 1.
- builder.CIRBaseBuilderTy::createStore(loc, sub.getResult(), itr);
+ builder.CIRBaseBuilderTy::createStore(loc, sub, itr);
} else {
// Lowerbound is inclusive, so we can include it.
builder.CIRBaseBuilderTy::createStore(loc, lbConversion.getResult(0),
@@ -214,8 +293,8 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue,
auto loadCur = cir::LoadOp::create(builder, loc, {itr});
// Use 'not equal' since we are just doing an increment/decrement.
auto cmp = builder.createCompare(
- loc, inverse ? cir::CmpOpKind::ge : cir::CmpOpKind::lt,
- loadCur.getResult(), endItr.getResult(0));
+ loc, inverse ? cir::CmpOpKind::ge : cir::CmpOpKind::lt, loadCur,
+ endItr.getResult(0));
builder.createCondition(cmp);
},
/*bodyBuilder=*/
@@ -229,11 +308,10 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue,
/*stepBuilder=*/
[&](mlir::OpBuilder &b, mlir::Location loc) {
auto load = cir::LoadOp::create(builder, loc, {itr});
- auto unary = cir::UnaryOp::create(builder, loc, load.getType(),
- inverse ? cir::UnaryOpKind::Dec
- : cir::UnaryOpKind::Inc,
- load.getResult());
- builder.CIRBaseBuilderTy::createStore(loc, unary.getResult(), itr);
+ auto unary = cir::UnaryOp::create(
+ builder, loc, load.getType(),
+ inverse ? cir::UnaryOpKind::Dec : cir::UnaryOpKind::Inc, load);
+ builder.CIRBaseBuilderTy::createStore(loc, unary, itr);
builder.createYield(loc);
});
};
@@ -322,6 +400,32 @@ void OpenACCRecipeBuilderBase::createRecipeDestroySection(
mlir::acc::YieldOp::create(builder, locEnd);
}
+void OpenACCRecipeBuilderBase::makeBoundsInit(
+ mlir::Value alloca, mlir::Location loc, mlir::Block *block,
+ const VarDecl *allocaDecl, QualType origType, bool isInitSection) {
+ mlir::OpBuilder::InsertionGuard guardCase(builder);
+ builder.setInsertionPointToEnd(block);
+ CIRGenFunction::LexicalScope ls(cgf, loc, block);
+
+ CIRGenFunction::AutoVarEmission tempDeclEmission{*allocaDecl};
+ tempDeclEmission.emittedAsOffload = true;
+
+ // The init section is the only one of the handful that only has a single
+ // argument for the 'type', so we have to drop 1 for init, and future calls
+ // to this will need to drop 2.
+ llvm::MutableArrayRef<mlir::BlockArgument> boundsRange =
+ block->getArguments().drop_front(isInitSection ? 1 : 2);
+
+ mlir::Value subscriptedValue = alloca;
+ for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange))
+ subscriptedValue = createBoundsLoop(subscriptedValue, boundArg, loc,
+ /*inverse=*/false);
+
+ tempDeclEmission.setAllocatedAddress(
+ Address{subscriptedValue, cgf.convertType(origType),
+ cgf.getContext().getDeclAlign(allocaDecl)});
+ cgf.emitAutoVarInit(tempDeclEmission);
+}
// TODO: OpenACC: When we get this implemented for the reduction/firstprivate,
// this might end up re-merging with createRecipeInitCopy. For now, keep it
@@ -331,7 +435,7 @@ void OpenACCRecipeBuilderBase::createPrivateInitRecipe(
mlir::Location loc, mlir::Location locEnd, SourceRange exprRange,
mlir::Value mainOp, mlir::acc::PrivateRecipeOp recipe, size_t numBounds,
llvm::ArrayRef<QualType> boundTypes, const VarDecl *allocaDecl,
- QualType origType, const Expr *initExpr) {
+ QualType origType) {
assert(allocaDecl && "Required recipe variable not set?");
CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, allocaDecl};
@@ -364,11 +468,17 @@ void OpenACCRecipeBuilderBase::createPrivateInitRecipe(
cgf.emitAutoVarAlloca(*allocaDecl, builder.saveInsertionPoint());
cgf.emitAutoVarInit(tempDeclEmission);
} else {
- makeBoundsAlloca(block, exprRange, loc, "openacc.private.init", numBounds,
- boundTypes);
-
- if (initExpr)
- cgf.cgm.errorNYI(exprRange, "private-init with bounds initialization");
+ mlir::Value alloca = makeBoundsAlloca(
+ block, exprRange, loc, "openacc.private.init", numBounds, boundTypes);
+
+ // If the initializer is trivial, there is nothing to do here, so save
+ // ourselves some effort.
+ if (allocaDecl->getInit() &&
+ (!cgf.isTrivialInitializer(allocaDecl->getInit()) ||
+ cgf.getContext().getLangOpts().getTrivialAutoVarInit() !=
+ LangOptions::TrivialAutoVarInitKind::Uninitialized))
+ makeBoundsInit(alloca, loc, block, allocaDecl, origType,
+ /*isInitSection=*/true);
}
mlir::acc::YieldOp::create(builder, locEnd);
@@ -395,7 +505,7 @@ void OpenACCRecipeBuilderBase::createFirstprivateRecipeCopy(
// that instead of the variable in the other block.
tempDeclEmission.setAllocatedAddress(
Address{toArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)});
- tempDeclEmission.EmittedAsOffload = true;
+ tempDeclEmission.emittedAsOffload = true;
CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, temporary};
cgf.setAddrOfLocalVar(
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h
index acd187b..a05b0bd 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h
@@ -24,6 +24,10 @@
namespace clang::CIRGen {
class OpenACCRecipeBuilderBase {
+ // makes the copy of the addresses of an alloca to the previous allocation.
+ void makeAllocaCopy(mlir::Location loc, mlir::Type copyType,
+ mlir::Value numEltsToCopy, mlir::Value offsetPerSubarray,
+ mlir::Value destAlloca, mlir::Value srcAlloca);
// This function generates the required alloca, similar to
// 'emitAutoVarAlloca', except for the OpenACC array/pointer types.
mlir::Value makeBoundsAlloca(mlir::Block *block, SourceRange exprRange,
@@ -31,6 +35,10 @@ class OpenACCRecipeBuilderBase {
size_t numBounds,
llvm::ArrayRef<QualType> boundTypes);
+ void makeBoundsInit(mlir::Value alloca, mlir::Location loc,
+ mlir::Block *block, const VarDecl *allocaDecl,
+ QualType origType, bool isInitSection);
+
protected:
CIRGen::CIRGenFunction &cgf;
CIRGen::CIRGenBuilderTy &builder;
@@ -62,8 +70,7 @@ protected:
mlir::acc::PrivateRecipeOp recipe,
size_t numBounds,
llvm::ArrayRef<QualType> boundTypes,
- const VarDecl *allocaDecl, QualType origType,
- const Expr *initExpr);
+ const VarDecl *allocaDecl, QualType origType);
void createRecipeDestroySection(mlir::Location loc, mlir::Location locEnd,
mlir::Value mainOp, CharUnits alignment,
@@ -204,15 +211,12 @@ public:
OpenACCRecipeBuilder(CIRGen::CIRGenFunction &cgf,
CIRGen::CIRGenBuilderTy &builder)
: OpenACCRecipeBuilderBase(cgf, builder) {}
- RecipeTy getOrCreateRecipe(ASTContext &astCtx,
- mlir::OpBuilder::InsertPoint &insertLocation,
- const Expr *varRef, const VarDecl *varRecipe,
- const Expr *initExpr, const VarDecl *temporary,
- OpenACCReductionOperator reductionOp,
- DeclContext *dc, QualType origType,
- size_t numBounds,
- llvm::ArrayRef<QualType> boundTypes,
- QualType baseType, mlir::Value mainOp) {
+ RecipeTy getOrCreateRecipe(
+ ASTContext &astCtx, mlir::OpBuilder::InsertPoint &insertLocation,
+ const Expr *varRef, const VarDecl *varRecipe, const VarDecl *temporary,
+ OpenACCReductionOperator reductionOp, DeclContext *dc, QualType origType,
+ size_t numBounds, llvm::ArrayRef<QualType> boundTypes, QualType baseType,
+ mlir::Value mainOp) {
assert(!varRecipe->getType()->isSpecificBuiltinType(
BuiltinType::ArraySection) &&
"array section shouldn't make it to recipe creation");
@@ -258,7 +262,7 @@ public:
if constexpr (std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) {
createPrivateInitRecipe(loc, locEnd, varRef->getSourceRange(), mainOp,
recipe, numBounds, boundTypes, varRecipe,
- origType, initExpr);
+ origType);
} else {
createRecipeInitCopy(loc, locEnd, varRef->getSourceRange(), mainOp,
recipe, varRecipe, temporary);
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h
index 914ef16..bf0ddc5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h
@@ -57,7 +57,7 @@ namespace clang::CIRGen {
/// cir.func @store_field() {
/// %0 = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["s"] {alignment = 4 : i64}
/// %1 = cir.const #cir.int<2> : !s32i
-/// %2 = cir.cast(integral, %1 : !s32i), !u32i
+/// %2 = cir.cast integral %1 : !s32i -> !u32i
/// %3 = cir.get_member %0[3] {name = "more_bits"} : !cir.ptr<!rec_S> ->
/// !cir.ptr<!u16i>
/// %4 = cir.set_bitfield(#bfi_more_bits, %3 :
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
index a762881..87f2340 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
@@ -296,9 +296,8 @@ void CIRRecordLowering::lower(bool nonVirtualBaseType) {
}
llvm::stable_sort(members);
- // TODO: implement clipTailPadding once bitfields are implemented
- assert(!cir::MissingFeatures::bitfields());
- assert(!cir::MissingFeatures::recordZeroInit());
+ // TODO: Verify bitfield clipping
+ assert(!cir::MissingFeatures::checkBitfieldClipping());
members.push_back(makeStorageInfo(size, getUIntNType(8)));
determinePacked(nonVirtualBaseType);
@@ -319,9 +318,11 @@ void CIRRecordLowering::fillOutputFields() {
fieldIdxMap[member.fieldDecl->getCanonicalDecl()] =
fieldTypes.size() - 1;
// A field without storage must be a bitfield.
- assert(!cir::MissingFeatures::bitfields());
- if (!member.data)
+ if (!member.data) {
+ assert(member.fieldDecl &&
+ "member.data is a nullptr so member.fieldDecl should not be");
setBitFieldInfo(member.fieldDecl, member.offset, fieldTypes.back());
+ }
} else if (member.kind == MemberInfo::InfoKind::Base) {
nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
} else if (member.kind == MemberInfo::InfoKind::VBase) {
@@ -615,7 +616,7 @@ void CIRRecordLowering::determinePacked(bool nvBaseType) {
continue;
// If any member falls at an offset that it not a multiple of its alignment,
// then the entire record must be packed.
- if (member.offset % getAlignment(member.data))
+ if (!member.offset.isMultipleOf(getAlignment(member.data)))
packed = true;
if (member.offset < nvSize)
nvAlignment = std::max(nvAlignment, getAlignment(member.data));
@@ -623,12 +624,12 @@ void CIRRecordLowering::determinePacked(bool nvBaseType) {
}
// If the size of the record (the capstone's offset) is not a multiple of the
// record's alignment, it must be packed.
- if (members.back().offset % alignment)
+ if (!members.back().offset.isMultipleOf(alignment))
packed = true;
// If the non-virtual sub-object is not a multiple of the non-virtual
// sub-object's alignment, it must be packed. We cannot have a packed
// non-virtual sub-object and an unpacked complete object or vise versa.
- if (nvSize % nvAlignment)
+ if (!nvSize.isMultipleOf(nvAlignment))
packed = true;
// Update the alignment of the sentinel.
if (!packed)
@@ -697,13 +698,9 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) {
ty ? *ty : cir::RecordType{}, baseTy ? baseTy : cir::RecordType{},
(bool)lowering.zeroInitializable, (bool)lowering.zeroInitializableAsBase);
- assert(!cir::MissingFeatures::recordZeroInit());
-
rl->nonVirtualBases.swap(lowering.nonVirtualBases);
rl->completeObjectVirtualBases.swap(lowering.virtualBases);
- assert(!cir::MissingFeatures::bitfields());
-
// Add all the field numbers.
rl->fieldIdxMap.swap(lowering.fieldIdxMap);
@@ -824,7 +821,7 @@ void CIRRecordLowering::lowerUnion() {
appendPaddingBytes(layoutSize - getSize(storageType));
// Set packed if we need it.
- if (layoutSize % getAlignment(storageType))
+ if (!layoutSize.isMultipleOf(getAlignment(storageType)))
packed = true;
}
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index fb87036..6b5cc80 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -2388,14 +2388,23 @@ OpFoldResult cir::ComplexCreateOp::fold(FoldAdaptor adaptor) {
//===----------------------------------------------------------------------===//
LogicalResult cir::ComplexRealOp::verify() {
- if (getType() != getOperand().getType().getElementType()) {
+ mlir::Type operandTy = getOperand().getType();
+ if (auto complexOperandTy = mlir::dyn_cast<cir::ComplexType>(operandTy)) {
+ operandTy = complexOperandTy.getElementType();
+ }
+
+ if (getType() != operandTy) {
emitOpError() << ": result type does not match operand type";
return failure();
}
+
return success();
}
OpFoldResult cir::ComplexRealOp::fold(FoldAdaptor adaptor) {
+ if (!mlir::isa<cir::ComplexType>(getOperand().getType()))
+ return nullptr;
+
if (auto complexCreateOp = getOperand().getDefiningOp<cir::ComplexCreateOp>())
return complexCreateOp.getOperand(0);
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 0f309e4..4bc7783 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2425,7 +2425,7 @@ static void prepareTypeConverter(mlir::LLVMTypeConverter &converter,
// For instance, this CIR code:
//
// cir.func @foo(%arg0: !s32i) -> !s32i {
-// %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool
+// %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
// cir.if %4 {
// %5 = cir.const #cir.int<1> : !s32i
// cir.return %5 : !s32i
@@ -2999,8 +2999,13 @@ mlir::LogicalResult CIRToLLVMComplexRealOpLowering::matchAndRewrite(
cir::ComplexRealOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
mlir::Type resultLLVMTy = getTypeConverter()->convertType(op.getType());
- rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(
- op, resultLLVMTy, adaptor.getOperand(), llvm::ArrayRef<std::int64_t>{0});
+ mlir::Value operand = adaptor.getOperand();
+ if (mlir::isa<cir::ComplexType>(op.getOperand().getType())) {
+ operand = mlir::LLVM::ExtractValueOp::create(
+ rewriter, op.getLoc(), resultLLVMTy, operand,
+ llvm::ArrayRef<std::int64_t>{0});
+ }
+ rewriter.replaceOp(op, operand);
return mlir::success();
}
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index eeb0fd6..4a3446a 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -880,7 +880,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
CharUnits MaxInlineWidth =
getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
DiagnosticsEngine &Diags = CGM.getDiags();
- bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
+ bool Misaligned = !Ptr.getAlignment().isMultipleOf(TInfo.Width);
bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
if (Misaligned) {
Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index a092b71..c52526c 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -1377,58 +1377,6 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
}
namespace {
-/// The parameters to pass to a usual operator delete.
-struct UsualDeleteParams {
- TypeAwareAllocationMode TypeAwareDelete = TypeAwareAllocationMode::No;
- bool DestroyingDelete = false;
- bool Size = false;
- AlignedAllocationMode Alignment = AlignedAllocationMode::No;
-};
-}
-
-static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) {
- UsualDeleteParams Params;
-
- const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
- auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
-
- if (FD->isTypeAwareOperatorNewOrDelete()) {
- Params.TypeAwareDelete = TypeAwareAllocationMode::Yes;
- assert(AI != AE);
- ++AI;
- }
-
- // The first argument after the type-identity parameter (if any) is
- // always a void* (or C* for a destroying operator delete for class
- // type C).
- ++AI;
-
- // The next parameter may be a std::destroying_delete_t.
- if (FD->isDestroyingOperatorDelete()) {
- assert(!isTypeAwareAllocation(Params.TypeAwareDelete));
- Params.DestroyingDelete = true;
- assert(AI != AE);
- ++AI;
- }
-
- // Figure out what other parameters we should be implicitly passing.
- if (AI != AE && (*AI)->isIntegerType()) {
- Params.Size = true;
- ++AI;
- } else
- assert(!isTypeAwareAllocation(Params.TypeAwareDelete));
-
- if (AI != AE && (*AI)->isAlignValT()) {
- Params.Alignment = AlignedAllocationMode::Yes;
- ++AI;
- } else
- assert(!isTypeAwareAllocation(Params.TypeAwareDelete));
-
- assert(AI == AE && "unexpected usual deallocation function parameter");
- return Params;
-}
-
-namespace {
/// A cleanup to call the given 'operator delete' function upon abnormal
/// exit from a new expression. Templated on a traits type that deals with
/// ensuring that the arguments dominate the cleanup if necessary.
@@ -1505,7 +1453,7 @@ namespace {
} else {
// For a non-placement new-expression, 'operator delete' can take a
// size and/or an alignment if it has the right parameters.
- Params = getUsualDeleteParams(OperatorDelete);
+ Params = OperatorDelete->getUsualDeleteParams();
}
assert(!Params.DestroyingDelete &&
@@ -1838,7 +1786,7 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
const auto *DeleteFTy = DeleteFD->getType()->castAs<FunctionProtoType>();
CallArgList DeleteArgs;
- auto Params = getUsualDeleteParams(DeleteFD);
+ auto Params = DeleteFD->getUsualDeleteParams();
auto ParamTypeIt = DeleteFTy->param_type_begin();
std::optional<llvm::AllocaInst *> TagAlloca;
diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp
index b44dd9e..6407afc 100644
--- a/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/clang/lib/CodeGen/CGExprConstant.cpp
@@ -433,7 +433,7 @@ llvm::Constant *ConstantAggregateBuilder::buildFrom(
// All remaining elements must be the same type.
if (Elems[I]->getType() != CommonType ||
- Offset(I) % ElemSize != 0) {
+ !Offset(I).isMultipleOf(ElemSize)) {
CanEmitArray = false;
break;
}
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index 60f30a1..dbcce9b 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -5367,7 +5367,7 @@ IvarLayoutBuilder::buildBitmap(CGObjCCommonMac &CGObjC,
// Ignore scan requests that don't start at an even multiple of the
// word size. We can't encode them.
- if ((beginOfScan % WordSize) != 0)
+ if (!beginOfScan.isMultipleOf(WordSize))
continue;
// Ignore scan requests that start before the instance start.
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 75bde3f..8cda583 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -1542,7 +1542,7 @@ static llvm::TargetRegionEntryInfo getEntryInfoFromPresumedLoc(
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(BeginLoc);
- if (CGM.getFileSystem()->exists(PLoc.getFilename()))
+ if (!CGM.getFileSystem()->exists(PLoc.getFilename()))
PLoc = SM.getPresumedLoc(BeginLoc, /*UseLineDirectives=*/false);
return std::pair<std::string, uint64_t>(PLoc.getFilename(), PLoc.getLine());
diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 5f6136c..e9205c6 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -369,11 +369,11 @@ void CGRecordLowering::lowerUnion(bool isNonVirtualBaseType) {
appendPaddingBytes(LayoutSize - getSize(StorageType));
// Set packed if we need it.
const auto StorageAlignment = getAlignment(StorageType);
- assert((Layout.getSize() % StorageAlignment == 0 ||
- Layout.getDataSize() % StorageAlignment) &&
+ assert((Layout.getSize().isMultipleOf(StorageAlignment) ||
+ !Layout.getDataSize().isMultipleOf(StorageAlignment)) &&
"Union's standard layout and no_unique_address layout must agree on "
"packedness");
- if (Layout.getDataSize() % StorageAlignment)
+ if (!Layout.getDataSize().isMultipleOf(StorageAlignment))
Packed = true;
}
@@ -977,7 +977,7 @@ void CGRecordLowering::determinePacked(bool NVBaseType) {
continue;
// If any member falls at an offset that it not a multiple of its alignment,
// then the entire record must be packed.
- if (Member.Offset % getAlignment(Member.Data))
+ if (!Member.Offset.isMultipleOf(getAlignment(Member.Data)))
Packed = true;
if (Member.Offset < NVSize)
NVAlignment = std::max(NVAlignment, getAlignment(Member.Data));
@@ -985,12 +985,12 @@ void CGRecordLowering::determinePacked(bool NVBaseType) {
}
// If the size of the record (the capstone's offset) is not a multiple of the
// record's alignment, it must be packed.
- if (Members.back().Offset % Alignment)
+ if (!Members.back().Offset.isMultipleOf(Alignment))
Packed = true;
// If the non-virtual sub-object is not a multiple of the non-virtual
// sub-object's alignment, it must be packed. We cannot have a packed
// non-virtual sub-object and an unpacked complete object or vise versa.
- if (NVSize % NVAlignment)
+ if (!NVSize.isMultipleOf(NVAlignment))
Packed = true;
// Update the alignment of the sentinel.
if (!Packed)
diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp
index 2e3fc53..4aa6314 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -486,6 +486,12 @@ llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
return getSPIRVImageTypeFromHLSLResource(ResAttrs, ContainedTy, CGM);
}
+ if (ResAttrs.IsCounter) {
+ llvm::Type *ElemType = llvm::Type::getInt32Ty(Ctx);
+ uint32_t StorageClass = /* StorageBuffer storage class */ 12;
+ return llvm::TargetExtType::get(Ctx, "spirv.VulkanBuffer", {ElemType},
+ {StorageClass, true});
+ }
llvm::Type *ElemType = CGM.getTypes().ConvertTypeForMem(ContainedTy);
llvm::ArrayType *RuntimeArrayType = llvm::ArrayType::get(ElemType, 0);
uint32_t StorageClass = /* StorageBuffer storage class */ 12;
diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp
index c03ba94..fb78948 100644
--- a/clang/lib/CodeGen/Targets/X86.cpp
+++ b/clang/lib/CodeGen/Targets/X86.cpp
@@ -1343,9 +1343,10 @@ class X86_64ABIInfo : public ABIInfo {
}
bool returnCXXRecordGreaterThan128InMem() const {
- // Clang <= 20.0 did not do this.
+ // Clang <= 20.0 did not do this, and PlayStation does not do this.
if (getContext().getLangOpts().getClangABICompat() <=
- LangOptions::ClangABI::Ver20)
+ LangOptions::ClangABI::Ver20 ||
+ getTarget().getTriple().isPS())
return false;
return true;
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index adaa6b3..412a176 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -2723,42 +2723,6 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
}
}
-static void EmitComplexRangeDiag(const Driver &D, StringRef LastOpt,
- LangOptions::ComplexRangeKind Range,
- StringRef NewOpt,
- LangOptions::ComplexRangeKind NewRange) {
- // Do not emit a warning if NewOpt overrides LastOpt in the following cases.
- //
- // | LastOpt | NewOpt |
- // |-----------------------|-----------------------|
- // | -fcx-limited-range | -fno-cx-limited-range |
- // | -fno-cx-limited-range | -fcx-limited-range |
- // | -fcx-fortran-rules | -fno-cx-fortran-rules |
- // | -fno-cx-fortran-rules | -fcx-fortran-rules |
- // | -ffast-math | -fno-fast-math |
- // | -ffp-model= | -ffast-math |
- // | -ffp-model= | -fno-fast-math |
- // | -ffp-model= | -ffp-model= |
- // | -fcomplex-arithmetic= | -fcomplex-arithmetic= |
- if (LastOpt == NewOpt || NewOpt.empty() || LastOpt.empty() ||
- (LastOpt == "-fcx-limited-range" && NewOpt == "-fno-cx-limited-range") ||
- (LastOpt == "-fno-cx-limited-range" && NewOpt == "-fcx-limited-range") ||
- (LastOpt == "-fcx-fortran-rules" && NewOpt == "-fno-cx-fortran-rules") ||
- (LastOpt == "-fno-cx-fortran-rules" && NewOpt == "-fcx-fortran-rules") ||
- (LastOpt == "-ffast-math" && NewOpt == "-fno-fast-math") ||
- (LastOpt.starts_with("-ffp-model=") && NewOpt == "-ffast-math") ||
- (LastOpt.starts_with("-ffp-model=") && NewOpt == "-fno-fast-math") ||
- (LastOpt.starts_with("-ffp-model=") &&
- NewOpt.starts_with("-ffp-model=")) ||
- (LastOpt.starts_with("-fcomplex-arithmetic=") &&
- NewOpt.starts_with("-fcomplex-arithmetic=")))
- return;
-
- D.Diag(clang::diag::warn_drv_overriding_complex_range)
- << LastOpt << NewOpt << complexRangeKindToStr(Range)
- << complexRangeKindToStr(NewRange);
-}
-
static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
bool OFastEnabled, const ArgList &Args,
ArgStringList &CmdArgs,
@@ -2815,27 +2779,19 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
std::string ComplexRangeStr;
StringRef LastComplexRangeOption;
- auto setComplexRange = [&](StringRef NewOption,
- LangOptions::ComplexRangeKind NewRange) {
- // Warn if user overrides the previously set complex number
- // multiplication/division option.
- if (Range != LangOptions::ComplexRangeKind::CX_None && Range != NewRange)
- EmitComplexRangeDiag(D, LastComplexRangeOption, Range, NewOption,
- NewRange);
- LastComplexRangeOption = NewOption;
- Range = NewRange;
- };
-
// Lambda to set fast-math options. This is also used by -ffp-model=fast
auto applyFastMath = [&](bool Aggressive, StringRef CallerOption) {
if (Aggressive) {
HonorINFs = false;
HonorNaNs = false;
- setComplexRange(CallerOption, LangOptions::ComplexRangeKind::CX_Basic);
+ setComplexRange(D, CallerOption, LangOptions::ComplexRangeKind::CX_Basic,
+ LastComplexRangeOption, Range);
} else {
HonorINFs = true;
HonorNaNs = true;
- setComplexRange(CallerOption, LangOptions::ComplexRangeKind::CX_Promoted);
+ setComplexRange(D, CallerOption,
+ LangOptions::ComplexRangeKind::CX_Promoted,
+ LastComplexRangeOption, Range);
}
MathErrno = false;
AssociativeMath = true;
@@ -2887,18 +2843,24 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
default: continue;
case options::OPT_fcx_limited_range:
- setComplexRange(A->getSpelling(),
- LangOptions::ComplexRangeKind::CX_Basic);
+ setComplexRange(D, A->getSpelling(),
+ LangOptions::ComplexRangeKind::CX_Basic,
+ LastComplexRangeOption, Range);
break;
case options::OPT_fno_cx_limited_range:
- setComplexRange(A->getSpelling(), LangOptions::ComplexRangeKind::CX_Full);
+ setComplexRange(D, A->getSpelling(),
+ LangOptions::ComplexRangeKind::CX_Full,
+ LastComplexRangeOption, Range);
break;
case options::OPT_fcx_fortran_rules:
- setComplexRange(A->getSpelling(),
- LangOptions::ComplexRangeKind::CX_Improved);
+ setComplexRange(D, A->getSpelling(),
+ LangOptions::ComplexRangeKind::CX_Improved,
+ LastComplexRangeOption, Range);
break;
case options::OPT_fno_cx_fortran_rules:
- setComplexRange(A->getSpelling(), LangOptions::ComplexRangeKind::CX_Full);
+ setComplexRange(D, A->getSpelling(),
+ LangOptions::ComplexRangeKind::CX_Full,
+ LastComplexRangeOption, Range);
break;
case options::OPT_fcomplex_arithmetic_EQ: {
LangOptions::ComplexRangeKind RangeVal;
@@ -2916,7 +2878,8 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
<< A->getSpelling() << Val;
break;
}
- setComplexRange(Args.MakeArgString(A->getSpelling() + Val), RangeVal);
+ setComplexRange(D, Args.MakeArgString(A->getSpelling() + Val), RangeVal,
+ LastComplexRangeOption, Range);
break;
}
case options::OPT_ffp_model_EQ: {
@@ -2956,8 +2919,9 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
FPModel = Val;
FPContract = "on";
LastFpContractOverrideOption = "-ffp-model=precise";
- setComplexRange(Args.MakeArgString(A->getSpelling() + Val),
- LangOptions::ComplexRangeKind::CX_Full);
+ setComplexRange(D, Args.MakeArgString(A->getSpelling() + Val),
+ LangOptions::ComplexRangeKind::CX_Full,
+ LastComplexRangeOption, Range);
} else if (Val == "strict") {
StrictFPModel = true;
FPExceptionBehavior = "strict";
@@ -2966,8 +2930,9 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
LastFpContractOverrideOption = "-ffp-model=strict";
TrappingMath = true;
RoundingFPMath = true;
- setComplexRange(Args.MakeArgString(A->getSpelling() + Val),
- LangOptions::ComplexRangeKind::CX_Full);
+ setComplexRange(D, Args.MakeArgString(A->getSpelling() + Val),
+ LangOptions::ComplexRangeKind::CX_Full,
+ LastComplexRangeOption, Range);
} else
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << Val;
@@ -3174,8 +3139,9 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
SignedZeros = true;
restoreFPContractState();
if (Range != LangOptions::ComplexRangeKind::CX_Full)
- setComplexRange(A->getSpelling(),
- LangOptions::ComplexRangeKind::CX_None);
+ setComplexRange(D, A->getSpelling(),
+ LangOptions::ComplexRangeKind::CX_None,
+ LastComplexRangeOption, Range);
else
Range = LangOptions::ComplexRangeKind::CX_None;
LastComplexRangeOption = "";
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index cce4f64..49ee53f 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -3557,3 +3557,51 @@ tools::renderComplexRangeOption(LangOptionsBase::ComplexRangeKind Range) {
return "-complex-range=" + ComplexRangeStr;
return ComplexRangeStr;
}
+
+static void emitComplexRangeDiag(const Driver &D, StringRef LastOpt,
+ LangOptions::ComplexRangeKind Range,
+ StringRef NewOpt,
+ LangOptions::ComplexRangeKind NewRange) {
+ // Do not emit a warning if NewOpt overrides LastOpt in the following cases.
+ //
+ // | LastOpt | NewOpt |
+ // |-----------------------|-----------------------|
+ // | -fcx-limited-range | -fno-cx-limited-range |
+ // | -fno-cx-limited-range | -fcx-limited-range |
+ // | -fcx-fortran-rules | -fno-cx-fortran-rules |
+ // | -fno-cx-fortran-rules | -fcx-fortran-rules |
+ // | -ffast-math | -fno-fast-math |
+ // | -ffp-model= | -ffast-math |
+ // | -ffp-model= | -fno-fast-math |
+ // | -ffp-model= | -ffp-model= |
+ // | -fcomplex-arithmetic= | -fcomplex-arithmetic= |
+ if (LastOpt == NewOpt || NewOpt.empty() || LastOpt.empty() ||
+ (LastOpt == "-fcx-limited-range" && NewOpt == "-fno-cx-limited-range") ||
+ (LastOpt == "-fno-cx-limited-range" && NewOpt == "-fcx-limited-range") ||
+ (LastOpt == "-fcx-fortran-rules" && NewOpt == "-fno-cx-fortran-rules") ||
+ (LastOpt == "-fno-cx-fortran-rules" && NewOpt == "-fcx-fortran-rules") ||
+ (LastOpt == "-ffast-math" && NewOpt == "-fno-fast-math") ||
+ (LastOpt.starts_with("-ffp-model=") && NewOpt == "-ffast-math") ||
+ (LastOpt.starts_with("-ffp-model=") && NewOpt == "-fno-fast-math") ||
+ (LastOpt.starts_with("-ffp-model=") &&
+ NewOpt.starts_with("-ffp-model=")) ||
+ (LastOpt.starts_with("-fcomplex-arithmetic=") &&
+ NewOpt.starts_with("-fcomplex-arithmetic=")))
+ return;
+
+ D.Diag(clang::diag::warn_drv_overriding_complex_range)
+ << LastOpt << NewOpt << complexRangeKindToStr(Range)
+ << complexRangeKindToStr(NewRange);
+}
+
+void tools::setComplexRange(const Driver &D, StringRef NewOpt,
+ LangOptions::ComplexRangeKind NewRange,
+ StringRef &LastOpt,
+ LangOptions::ComplexRangeKind &Range) {
+ // Warn if user overrides the previously set complex number
+ // multiplication/division option.
+ if (Range != LangOptions::ComplexRangeKind::CX_None && Range != NewRange)
+ emitComplexRangeDiag(D, LastOpt, Range, NewOpt, NewRange);
+ LastOpt = NewOpt;
+ Range = NewRange;
+}
diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp
index a539481..a56fa41 100644
--- a/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/clang/lib/Driver/ToolChains/Flang.cpp
@@ -693,6 +693,7 @@ static void addFloatingPointOptions(const Driver &D, const ArgList &Args,
bool AssociativeMath = false;
bool ReciprocalMath = false;
+ StringRef LastComplexRangeOption;
LangOptions::ComplexRangeKind Range = LangOptions::ComplexRangeKind::CX_None;
if (const Arg *A = Args.getLastArg(options::OPT_ffp_contract)) {
@@ -720,17 +721,22 @@ static void addFloatingPointOptions(const Driver &D, const ArgList &Args,
continue;
case options::OPT_fcomplex_arithmetic_EQ: {
+ LangOptions::ComplexRangeKind NewRange;
StringRef Val = A->getValue();
if (Val == "full")
- Range = LangOptions::ComplexRangeKind::CX_Full;
+ NewRange = LangOptions::ComplexRangeKind::CX_Full;
else if (Val == "improved")
- Range = LangOptions::ComplexRangeKind::CX_Improved;
+ NewRange = LangOptions::ComplexRangeKind::CX_Improved;
else if (Val == "basic")
- Range = LangOptions::ComplexRangeKind::CX_Basic;
+ NewRange = LangOptions::ComplexRangeKind::CX_Basic;
else {
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << Val;
+ break;
}
+
+ setComplexRange(D, Args.MakeArgString(A->getSpelling() + Val), NewRange,
+ LastComplexRangeOption, Range);
break;
}
case options::OPT_fhonor_infinities:
@@ -779,6 +785,9 @@ static void addFloatingPointOptions(const Driver &D, const ArgList &Args,
ApproxFunc = true;
SignedZeros = false;
FPContract = "fast";
+ setComplexRange(D, A->getSpelling(),
+ LangOptions::ComplexRangeKind::CX_Basic,
+ LastComplexRangeOption, Range);
break;
case options::OPT_fno_fast_math:
HonorINFs = true;
@@ -792,6 +801,9 @@ static void addFloatingPointOptions(const Driver &D, const ArgList &Args,
// --ffp-contract=off -fno-fast-math --> -ffp-contract=off
if (FPContract == "fast")
FPContract = "";
+ setComplexRange(D, A->getSpelling(),
+ LangOptions::ComplexRangeKind::CX_None,
+ LastComplexRangeOption, Range);
break;
}
@@ -810,6 +822,9 @@ static void addFloatingPointOptions(const Driver &D, const ArgList &Args,
complexRangeKindToStr(Range)));
}
+ if (Args.hasArg(options::OPT_fno_fast_real_mod))
+ CmdArgs.push_back("-fno-fast-real-mod");
+
if (!HonorINFs && !HonorNaNs && AssociativeMath && ReciprocalMath &&
ApproxFunc && !SignedZeros &&
(FPContract == "fast" || FPContract.empty())) {
diff --git a/clang/lib/Driver/ToolChains/HLSL.cpp b/clang/lib/Driver/ToolChains/HLSL.cpp
index f4858e4..2869549 100644
--- a/clang/lib/Driver/ToolChains/HLSL.cpp
+++ b/clang/lib/Driver/ToolChains/HLSL.cpp
@@ -64,7 +64,7 @@ bool isLegalShaderModel(Triple &T) {
} break;
case Triple::EnvironmentType::RootSignature:
VersionTuple MinVer(1, 0);
- VersionTuple MaxVer(1, 1);
+ VersionTuple MaxVer(1, 2);
return MinVer <= Version && Version <= MaxVer;
}
return false;
diff --git a/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp b/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
index 1087eb3..6966d40 100644
--- a/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
+++ b/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
@@ -444,8 +444,7 @@ bool ExtractAPIAction::PrepareToExecuteAction(CompilerInstance &CI) {
return true;
if (!CI.hasFileManager())
- if (!CI.createFileManager())
- return false;
+ CI.createFileManager();
auto Kind = Inputs[0].getKind();
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index 9413c13..cd4c1aa 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -368,7 +368,7 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
// If binary operators are moved to the next line (including commas for some
// styles of constructor initializers), that's always ok.
- if (!Current.isOneOf(TT_BinaryOperator, tok::comma) &&
+ if (Current.isNoneOf(TT_BinaryOperator, tok::comma) &&
// Allow breaking opening brace of lambdas (when passed as function
// arguments) to a new line when BeforeLambdaBody brace wrapping is
// enabled.
@@ -445,7 +445,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
(!Style.BreakBeforeTernaryOperators &&
Previous.is(TT_ConditionalExpr))) &&
CurrentState.BreakBeforeParameter && !Current.isTrailingComment() &&
- !Current.isOneOf(tok::r_paren, tok::r_brace)) {
+ Current.isNoneOf(tok::r_paren, tok::r_brace)) {
return true;
}
if (CurrentState.IsChainedConditional &&
@@ -523,9 +523,9 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
if (Style.AlwaysBreakBeforeMultilineStrings &&
(NewLineColumn == State.FirstIndent + Style.ContinuationIndentWidth ||
Previous.is(tok::comma) || Current.NestingLevel < 2) &&
- !Previous.isOneOf(tok::kw_return, tok::lessless, tok::at,
+ Previous.isNoneOf(tok::kw_return, tok::lessless, tok::at,
Keywords.kw_dollar) &&
- !Previous.isOneOf(TT_InlineASMColon, TT_ConditionalExpr) &&
+ Previous.isNoneOf(TT_InlineASMColon, TT_ConditionalExpr) &&
nextIsMultilineString(State)) {
return true;
}
@@ -648,7 +648,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
// into the ColumnLimit, they are checked here in the ContinuationIndenter.
if (Style.ColumnLimit != 0 && Previous.is(BK_Block) &&
Previous.is(tok::l_brace) &&
- !Current.isOneOf(tok::r_brace, tok::comment)) {
+ Current.isNoneOf(tok::r_brace, tok::comment)) {
return true;
}
@@ -752,7 +752,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
return false;
const auto *Next = Comma->getNextNonComment();
- return Next && !Next->isOneOf(TT_LambdaLSquare, tok::l_brace, tok::caret);
+ return Next && Next->isNoneOf(TT_LambdaLSquare, tok::l_brace, tok::caret);
};
if (DisallowLineBreaks())
@@ -835,7 +835,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
return Tok.is(tok::l_brace) && Tok.isNot(BK_Block) &&
Style.Cpp11BracedListStyle;
};
- if (!Tok.isOneOf(tok::l_paren, TT_TemplateOpener, tok::l_square) &&
+ if (Tok.isNoneOf(tok::l_paren, TT_TemplateOpener, tok::l_square) &&
!IsStartOfBracedList()) {
return false;
}
@@ -843,7 +843,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
return true;
if (Tok.Previous->isIf())
return Style.AlignAfterOpenBracket == FormatStyle::BAS_AlwaysBreak;
- return !Tok.Previous->isOneOf(TT_CastRParen, tok::kw_for, tok::kw_while,
+ return Tok.Previous->isNoneOf(TT_CastRParen, tok::kw_for, tok::kw_while,
tok::kw_switch) &&
!(Style.isJavaScript() && Tok.Previous->is(Keywords.kw_await));
};
@@ -882,8 +882,8 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
Tok.isOneOf(tok::ellipsis, Keywords.kw_await))) {
return true;
}
- const auto *Previous = Tok.Previous;
- if (!Previous || (!Previous->isOneOf(TT_FunctionDeclarationLParen,
+ if (const auto *Previous = Tok.Previous;
+ !Previous || (Previous->isNoneOf(TT_FunctionDeclarationLParen,
TT_LambdaDefinitionLParen) &&
!IsFunctionCallParen(*Previous))) {
return true;
@@ -920,9 +920,9 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// align the commas with the opening paren.
if (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign &&
!CurrentState.IsCSharpGenericTypeConstraint && Previous.opensScope() &&
- Previous.isNot(TT_ObjCMethodExpr) && Previous.isNot(TT_RequiresClause) &&
- Previous.isNot(TT_TableGenDAGArgOpener) &&
- Previous.isNot(TT_TableGenDAGArgOpenerToBreak) &&
+ Previous.isNoneOf(TT_ObjCMethodExpr, TT_RequiresClause,
+ TT_TableGenDAGArgOpener,
+ TT_TableGenDAGArgOpenerToBreak) &&
!(Current.MacroParent && Previous.MacroParent) &&
(Current.isNot(TT_LineComment) ||
Previous.isOneOf(BK_BracedInit, TT_VerilogMultiLineListLParen)) &&
@@ -962,7 +962,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
if (Current.isNot(tok::comment) && P &&
(P->isOneOf(TT_BinaryOperator, tok::comma) ||
(P->is(TT_ConditionalExpr) && P->is(tok::colon))) &&
- !P->isOneOf(TT_OverloadedOperator, TT_CtorInitializerComma) &&
+ P->isNoneOf(TT_OverloadedOperator, TT_CtorInitializerComma) &&
P->getPrecedence() != prec::Assignment &&
P->getPrecedence() != prec::Relational &&
P->getPrecedence() != prec::Spaceship) {
@@ -992,7 +992,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// parameter, i.e. let nested calls have a continuation indent.
CurrentState.LastSpace = State.Column;
CurrentState.NestedBlockIndent = State.Column;
- } else if (!Current.isOneOf(tok::comment, tok::caret) &&
+ } else if (Current.isNoneOf(tok::comment, tok::caret) &&
((Previous.is(tok::comma) &&
Previous.isNot(TT_OverloadedOperator)) ||
(Previous.is(tok::colon) && Previous.is(TT_ObjCMethodExpr)))) {
@@ -1099,7 +1099,7 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
if (Current.isNot(TT_LambdaArrow) &&
(!Style.isJavaScript() || Current.NestingLevel != 0 ||
!PreviousNonComment || PreviousNonComment->isNot(tok::equal) ||
- !Current.isOneOf(Keywords.kw_async, Keywords.kw_function))) {
+ Current.isNoneOf(Keywords.kw_async, Keywords.kw_function))) {
CurrentState.NestedBlockIndent = State.Column;
}
@@ -1239,11 +1239,11 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
}
if (PreviousNonComment &&
- !PreviousNonComment->isOneOf(tok::comma, tok::colon, tok::semi) &&
+ PreviousNonComment->isNoneOf(tok::comma, tok::colon, tok::semi) &&
((PreviousNonComment->isNot(TT_TemplateCloser) &&
!PreviousNonComment->ClosesRequiresClause) ||
Current.NestingLevel != 0) &&
- !PreviousNonComment->isOneOf(
+ PreviousNonComment->isNoneOf(
TT_BinaryOperator, TT_FunctionAnnotationRParen, TT_JavaAnnotation,
TT_LeadingJavaAnnotation) &&
Current.isNot(TT_BinaryOperator) && !PreviousNonComment->opensScope() &&
@@ -1281,8 +1281,8 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
bool AllowAllConstructorInitializersOnNextLine =
Style.PackConstructorInitializers == FormatStyle::PCIS_NextLine ||
Style.PackConstructorInitializers == FormatStyle::PCIS_NextLineOnly;
- if (!(Previous.isOneOf(tok::l_paren, tok::l_brace, TT_BinaryOperator) ||
- PreviousIsBreakingCtorInitializerColon) ||
+ if ((Previous.isNoneOf(tok::l_paren, tok::l_brace, TT_BinaryOperator) &&
+ !PreviousIsBreakingCtorInitializerColon) ||
(!Style.AllowAllParametersOfDeclarationOnNextLine &&
State.Line->MustBeDeclaration) ||
(!Style.AllowAllArgumentsOnNextLine &&
@@ -1576,7 +1576,7 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
if (Previous.is(tok::r_paren) &&
Previous.isNot(TT_TableGenDAGArgOperatorToBreak) &&
!Current.isBinaryOperator() &&
- !Current.isOneOf(tok::colon, tok::comment)) {
+ Current.isNoneOf(tok::colon, tok::comment)) {
return ContinuationIndent;
}
if (Current.is(TT_ProtoExtensionLSquare))
@@ -1591,7 +1591,7 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
NextNonComment->SpacesRequiredBefore;
}
if (CurrentState.Indent == State.FirstIndent && PreviousNonComment &&
- !PreviousNonComment->isOneOf(tok::r_brace, TT_CtorInitializerComma)) {
+ PreviousNonComment->isNoneOf(tok::r_brace, TT_CtorInitializerComma)) {
// Ensure that we fall back to the continuation indent width instead of
// just flushing continuations left.
return CurrentState.Indent + Style.ContinuationIndentWidth;
@@ -1734,7 +1734,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
}
if (Previous && (Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr) ||
(Previous->isOneOf(tok::l_paren, tok::comma, tok::colon) &&
- !Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr,
+ Previous->isNoneOf(TT_DictLiteral, TT_ObjCMethodExpr,
TT_CtorInitializerColon)))) {
CurrentState.NestedBlockInlined =
!Newline && hasNestedBlockInlined(Previous, Current, Style);
@@ -1758,7 +1758,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
State.StartOfStringLiteral = State.Column + 1;
} else if (Current.isStringLiteral() && State.StartOfStringLiteral == 0) {
State.StartOfStringLiteral = State.Column;
- } else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash) &&
+ } else if (Current.isNoneOf(tok::comment, tok::identifier, tok::hash) &&
!Current.isStringLiteral()) {
State.StartOfStringLiteral = 0;
}
@@ -2057,7 +2057,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
// array literals as these follow different indentation rules.
bool NoLineBreak =
Current.Children.empty() &&
- !Current.isOneOf(TT_DictLiteral, TT_ArrayInitializerLSquare) &&
+ Current.isNoneOf(TT_DictLiteral, TT_ArrayInitializerLSquare) &&
(CurrentState.NoLineBreak || CurrentState.NoLineBreakInOperand ||
(Current.is(TT_TemplateOpener) &&
CurrentState.ContainsUnwrappedBuilder));
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 835071d..2bf6244 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -2435,7 +2435,7 @@ private:
const auto *NextLine = I + 1 == End ? nullptr : I[1];
for (const auto *Token = Line->First; Token && !Token->Finalized;
Token = Token->Next) {
- if (!Token->Optional || !Token->isOneOf(tok::l_brace, tok::r_brace))
+ if (!Token->Optional || Token->isNoneOf(tok::l_brace, tok::r_brace))
continue;
auto *Next = Token->Next;
assert(Next || Token == Line->Last);
diff --git a/clang/lib/Format/FormatToken.cpp b/clang/lib/Format/FormatToken.cpp
index c60ae8f..c2956a1 100644
--- a/clang/lib/Format/FormatToken.cpp
+++ b/clang/lib/Format/FormatToken.cpp
@@ -108,7 +108,7 @@ unsigned CommaSeparatedList::formatAfterToken(LineState &State,
// Ensure that we start on the opening brace.
const FormatToken *LBrace =
State.NextToken->Previous->getPreviousNonComment();
- if (!LBrace || !LBrace->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
+ if (!LBrace || LBrace->isNoneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
LBrace->is(BK_Block) || LBrace->is(TT_DictLiteral) ||
LBrace->Next->is(TT_DesignatedInitializerPeriod)) {
return 0;
@@ -177,7 +177,7 @@ static unsigned CodePointsBetween(const FormatToken *Begin,
void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// FIXME: At some point we might want to do this for other lists, too.
if (!Token->MatchingParen ||
- !Token->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare)) {
+ Token->isNoneOf(tok::l_brace, TT_ArrayInitializerLSquare)) {
return;
}
diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h
index a28446a..e4ddd61 100644
--- a/clang/lib/Format/FormatToken.h
+++ b/clang/lib/Format/FormatToken.h
@@ -645,6 +645,9 @@ public:
return is(K1) || isOneOf(K2, Ks...);
}
template <typename T> bool isNot(T Kind) const { return !is(Kind); }
+ template <typename... Ts> bool isNoneOf(Ts... Ks) const {
+ return !isOneOf(Ks...);
+ }
bool isIf(bool AllowConstexprMacro = true) const {
return is(tok::kw_if) || endsSequence(tok::kw_constexpr, tok::kw_if) ||
@@ -748,7 +751,7 @@ public:
/// Returns \c true if this is a "." or "->" accessing a member.
bool isMemberAccess() const {
return isOneOf(tok::arrow, tok::period, tok::arrowstar) &&
- !isOneOf(TT_DesignatedInitializerPeriod, TT_TrailingReturnArrow,
+ isNoneOf(TT_DesignatedInitializerPeriod, TT_TrailingReturnArrow,
TT_LambdaArrow, TT_LeadingJavaAnnotation);
}
diff --git a/clang/lib/Format/FormatTokenLexer.cpp b/clang/lib/Format/FormatTokenLexer.cpp
index 3f4aa52..86a5185 100644
--- a/clang/lib/Format/FormatTokenLexer.cpp
+++ b/clang/lib/Format/FormatTokenLexer.cpp
@@ -733,7 +733,7 @@ void FormatTokenLexer::tryParseJavaTextBlock() {
// its text if successful.
void FormatTokenLexer::tryParseJSRegexLiteral() {
FormatToken *RegexToken = Tokens.back();
- if (!RegexToken->isOneOf(tok::slash, tok::slashequal))
+ if (RegexToken->isNoneOf(tok::slash, tok::slashequal))
return;
FormatToken *Prev = nullptr;
@@ -1041,7 +1041,7 @@ void FormatTokenLexer::handleTemplateStrings() {
void FormatTokenLexer::tryParsePythonComment() {
FormatToken *HashToken = Tokens.back();
- if (!HashToken->isOneOf(tok::hash, tok::hashhash))
+ if (HashToken->isNoneOf(tok::hash, tok::hashhash))
return;
// Turn the remainder of this line into a comment.
const char *CommentBegin =
diff --git a/clang/lib/Format/MacroExpander.cpp b/clang/lib/Format/MacroExpander.cpp
index 85a53c9..445e173 100644
--- a/clang/lib/Format/MacroExpander.cpp
+++ b/clang/lib/Format/MacroExpander.cpp
@@ -86,7 +86,7 @@ private:
}
bool parseExpansion() {
- if (!Current->isOneOf(tok::equal, tok::eof))
+ if (Current->isNoneOf(tok::equal, tok::eof))
return false;
if (Current->is(tok::equal))
nextToken();
diff --git a/clang/lib/Format/NamespaceEndCommentsFixer.cpp b/clang/lib/Format/NamespaceEndCommentsFixer.cpp
index 08f8d68..95ccfac 100644
--- a/clang/lib/Format/NamespaceEndCommentsFixer.cpp
+++ b/clang/lib/Format/NamespaceEndCommentsFixer.cpp
@@ -70,7 +70,7 @@ std::string computeName(const FormatToken *NamespaceTok) {
// and closing parenthesis or comma.
assert(Tok && Tok->is(tok::l_paren) && "expected an opening parenthesis");
Tok = Tok->getNextNonComment();
- while (Tok && !Tok->isOneOf(tok::r_paren, tok::comma)) {
+ while (Tok && Tok->isNoneOf(tok::r_paren, tok::comma)) {
name += Tok->TokenText;
Tok = Tok->getNextNonComment();
}
@@ -85,7 +85,7 @@ std::string computeName(const FormatToken *NamespaceTok) {
// one token before that up until the '{'. A '(' might be a macro with
// arguments.
const FormatToken *FirstNSTok = nullptr;
- while (Tok && !Tok->isOneOf(tok::l_brace, tok::coloncolon, tok::l_paren)) {
+ while (Tok && Tok->isNoneOf(tok::l_brace, tok::coloncolon, tok::l_paren)) {
if (FirstNSTok)
FirstNSName += FirstNSTok->TokenText;
FirstNSTok = Tok;
diff --git a/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp b/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp
index b885942..b12b370 100644
--- a/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp
+++ b/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp
@@ -61,7 +61,7 @@ void ObjCPropertyAttributeOrderFixer::sortPropertyAttributes(
}
// Most attributes look like identifiers, but `class` is a keyword.
- if (!Tok->isOneOf(tok::identifier, tok::kw_class)) {
+ if (Tok->isNoneOf(tok::identifier, tok::kw_class)) {
// If we hit any other kind of token, just bail.
return;
}
diff --git a/clang/lib/Format/QualifierAlignmentFixer.cpp b/clang/lib/Format/QualifierAlignmentFixer.cpp
index 043d957..e3e30ca 100644
--- a/clang/lib/Format/QualifierAlignmentFixer.cpp
+++ b/clang/lib/Format/QualifierAlignmentFixer.cpp
@@ -508,7 +508,7 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
// Don't change declarations such as
// `foo(struct Foo const a);` -> `foo(struct Foo const a);`
- if (!Previous || !Previous->isOneOf(tok::kw_struct, tok::kw_class)) {
+ if (!Previous || Previous->isNoneOf(tok::kw_struct, tok::kw_class)) {
insertQualifierBefore(SourceMgr, Fixes, TypeToken, Qualifier);
removeToken(SourceMgr, Fixes, Tok);
}
diff --git a/clang/lib/Format/SortJavaScriptImports.cpp b/clang/lib/Format/SortJavaScriptImports.cpp
index ace3dff..a403a4f 100644
--- a/clang/lib/Format/SortJavaScriptImports.cpp
+++ b/clang/lib/Format/SortJavaScriptImports.cpp
@@ -439,7 +439,7 @@ private:
// for grammar EBNF (production ModuleItem).
bool parseModuleReference(const AdditionalKeywords &Keywords,
JsModuleReference &Reference) {
- if (!Current || !Current->isOneOf(Keywords.kw_import, tok::kw_export))
+ if (!Current || Current->isNoneOf(Keywords.kw_import, tok::kw_export))
return false;
Reference.IsExport = Current->is(tok::kw_export);
@@ -570,7 +570,7 @@ private:
Symbol.Range.setEnd(Current->Tok.getLocation());
Reference.Symbols.push_back(Symbol);
- if (!Current->isOneOf(tok::r_brace, tok::comma))
+ if (Current->isNoneOf(tok::r_brace, tok::comma))
return false;
}
Reference.SymbolsEnd = Current->Tok.getLocation();
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 0c9c88a..59f81b3 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -203,7 +203,7 @@ private:
return false;
}
if (InExpr && SeenTernaryOperator &&
- (!Next || !Next->isOneOf(tok::l_paren, tok::l_brace))) {
+ (!Next || Next->isNoneOf(tok::l_paren, tok::l_brace))) {
return false;
}
if (!MaybeAngles)
@@ -577,7 +577,7 @@ private:
if (IsIf && CurrentToken->is(tok::semi)) {
for (auto *Tok = OpeningParen.Next;
Tok != CurrentToken &&
- !Tok->isOneOf(tok::equal, tok::l_paren, tok::l_brace);
+ Tok->isNoneOf(tok::equal, tok::l_paren, tok::l_brace);
Tok = Tok->Next) {
if (Tok->isPointerOrReference())
Tok->setFinalizedType(TT_PointerOrReference);
@@ -704,7 +704,7 @@ private:
!IsCppStructuredBinding && !InsideInlineASM && !CppArrayTemplates &&
IsCpp && !IsCpp11AttributeSpecifier && !IsCSharpAttributeSpecifier &&
Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) &&
- !CurrentToken->isOneOf(tok::l_brace, tok::r_square) &&
+ CurrentToken->isNoneOf(tok::l_brace, tok::r_square) &&
(!Parent ||
Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren,
tok::kw_return, tok::kw_throw) ||
@@ -1334,7 +1334,7 @@ private:
if (Style.isJavaScript()) {
if (Contexts.back().ColonIsForRangeExpr || // colon in for loop
(Contexts.size() == 1 && // switch/case labels
- !Line.First->isOneOf(tok::kw_enum, tok::kw_case)) ||
+ Line.First->isNoneOf(tok::kw_enum, tok::kw_case)) ||
Contexts.back().ContextKind == tok::l_paren || // function params
Contexts.back().ContextKind == tok::l_square || // array type
(!Contexts.back().IsExpression &&
@@ -1411,7 +1411,7 @@ private:
} else if (Contexts.back().ColonIsForRangeExpr) {
Tok->setType(TT_RangeBasedForLoopColon);
for (auto *Token = Prev;
- Token && !Token->isOneOf(tok::semi, tok::l_paren);
+ Token && Token->isNoneOf(tok::semi, tok::l_paren);
Token = Token->Previous) {
if (Token->isPointerOrReference())
Token->setFinalizedType(TT_PointerOrReference);
@@ -1425,7 +1425,7 @@ private:
Scopes.back() == ST_Class)) {
Tok->setType(TT_BitFieldColon);
} else if (Contexts.size() == 1 &&
- !Line.getFirstNonComment()->isOneOf(tok::kw_enum, tok::kw_case,
+ Line.getFirstNonComment()->isNoneOf(tok::kw_enum, tok::kw_case,
tok::kw_default) &&
!Line.startsWith(tok::kw_typedef, tok::kw_enum)) {
if (Prev->isOneOf(tok::r_paren, tok::kw_noexcept) ||
@@ -1562,10 +1562,10 @@ private:
if (Line.MustBeDeclaration && Contexts.size() == 1 &&
!Contexts.back().IsExpression && !Line.startsWith(TT_ObjCProperty) &&
!Line.startsWith(tok::l_paren) &&
- !Tok->isOneOf(TT_TypeDeclarationParen, TT_RequiresExpressionLParen)) {
+ Tok->isNoneOf(TT_TypeDeclarationParen, TT_RequiresExpressionLParen)) {
if (!Prev ||
(!Prev->isAttribute() &&
- !Prev->isOneOf(TT_RequiresClause, TT_LeadingJavaAnnotation,
+ Prev->isNoneOf(TT_RequiresClause, TT_LeadingJavaAnnotation,
TT_BinaryOperator))) {
Line.MightBeFunctionDecl = true;
Tok->MightBeFunctionDeclParen = true;
@@ -1664,7 +1664,7 @@ private:
}
}
while (CurrentToken &&
- !CurrentToken->isOneOf(tok::l_paren, tok::semi, tok::r_paren)) {
+ CurrentToken->isNoneOf(tok::l_paren, tok::semi, tok::r_paren)) {
if (CurrentToken->isOneOf(tok::star, tok::amp))
CurrentToken->setType(TT_PointerOrReference);
auto Next = CurrentToken->getNextNonComment();
@@ -1728,8 +1728,8 @@ private:
// cond ? id : "B";
// cond ? cond2 ? "A" : "B" : "C";
if (!Contexts.back().IsExpression && Line.MustBeDeclaration &&
- (!Next || !Next->isOneOf(tok::identifier, tok::string_literal) ||
- !Next->Next || !Next->Next->isOneOf(tok::colon, tok::question))) {
+ (!Next || Next->isNoneOf(tok::identifier, tok::string_literal) ||
+ !Next->Next || Next->Next->isNoneOf(tok::colon, tok::question))) {
Tok->setType(TT_CSharpNullable);
break;
}
@@ -1796,7 +1796,7 @@ private:
if (!parseTableGenValue())
return false;
} else if (Tok->isOneOf(Keywords.kw_def, Keywords.kw_defm) &&
- (!Next || !Next->isOneOf(tok::colon, tok::l_brace))) {
+ (!Next || Next->isNoneOf(tok::colon, tok::l_brace))) {
// The case NameValue appears.
if (!parseTableGenValue(true))
return false;
@@ -2094,7 +2094,7 @@ private:
// Reset token type in case we have already looked at it and then
// recovered from an error (e.g. failure to find the matching >).
if (!CurrentToken->isTypeFinalized() &&
- !CurrentToken->isOneOf(
+ CurrentToken->isNoneOf(
TT_LambdaLSquare, TT_LambdaLBrace, TT_AttributeMacro, TT_IfMacro,
TT_ForEachMacro, TT_TypenameMacro, TT_FunctionLBrace,
TT_ImplicitStringLiteral, TT_InlineASMBrace, TT_FatArrow,
@@ -2230,7 +2230,7 @@ private:
// type or non-type.
if (Contexts.back().ContextKind == tok::less) {
assert(Current.Previous->Previous);
- return !Current.Previous->Previous->isOneOf(tok::kw_typename,
+ return Current.Previous->Previous->isNoneOf(tok::kw_typename,
tok::kw_class);
}
@@ -2266,7 +2266,7 @@ private:
if (!Line.startsWith(TT_UnaryOperator)) {
for (FormatToken *Previous = Current.Previous;
Previous && Previous->Previous &&
- !Previous->Previous->isOneOf(tok::comma, tok::semi);
+ Previous->Previous->isNoneOf(tok::comma, tok::semi);
Previous = Previous->Previous) {
if (Previous->isOneOf(tok::r_square, tok::r_paren, tok::greater)) {
Previous = Previous->MatchingParen;
@@ -2430,7 +2430,7 @@ private:
Current.setType(TT_BinaryOperator);
} else if (Current.is(tok::arrow) && AutoFound &&
Line.MightBeFunctionDecl && Current.NestingLevel == 0 &&
- !Current.Previous->isOneOf(tok::kw_operator, tok::identifier)) {
+ Current.Previous->isNoneOf(tok::kw_operator, tok::identifier)) {
// not auto operator->() -> xxx;
Current.setType(TT_TrailingReturnArrow);
} else if (Current.is(tok::arrow) && Current.Previous &&
@@ -2511,7 +2511,7 @@ private:
Current.setType(TT_CastRParen);
if (Current.MatchingParen && Current.Next &&
!Current.Next->isBinaryOperator() &&
- !Current.Next->isOneOf(
+ Current.Next->isNoneOf(
tok::semi, tok::colon, tok::l_brace, tok::l_paren, tok::comma,
tok::period, tok::arrow, tok::coloncolon, tok::kw_noexcept)) {
if (FormatToken *AfterParen = Current.MatchingParen->Next;
@@ -2569,7 +2569,7 @@ private:
} else if (Current.isOneOf(tok::identifier, tok::kw_const, tok::kw_noexcept,
tok::kw_requires) &&
Current.Previous &&
- !Current.Previous->isOneOf(tok::equal, tok::at,
+ Current.Previous->isNoneOf(tok::equal, tok::at,
TT_CtorInitializerComma,
TT_CtorInitializerColon) &&
Line.MightBeFunctionDecl && Contexts.size() == 1) {
@@ -2658,7 +2658,7 @@ private:
if (PreviousNotConst->is(TT_TemplateCloser)) {
return PreviousNotConst && PreviousNotConst->MatchingParen &&
PreviousNotConst->MatchingParen->Previous &&
- !PreviousNotConst->MatchingParen->Previous->isOneOf(
+ PreviousNotConst->MatchingParen->Previous->isNoneOf(
tok::period, tok::kw_template);
}
@@ -2780,7 +2780,7 @@ private:
// If there is an identifier (or with a few exceptions a keyword) right
// before the parentheses, this is unlikely to be a cast.
if (LeftOfParens->Tok.getIdentifierInfo() &&
- !LeftOfParens->isOneOf(Keywords.kw_in, tok::kw_return, tok::kw_case,
+ LeftOfParens->isNoneOf(Keywords.kw_in, tok::kw_return, tok::kw_case,
tok::kw_delete, tok::kw_throw)) {
return false;
}
@@ -2918,7 +2918,7 @@ private:
const bool NextIsAmpOrStar = AfterRParen->isOneOf(tok::amp, tok::star);
if (!(AfterRParen->isUnaryOperator() || NextIsAmpOrStar) ||
AfterRParen->is(tok::plus) ||
- !AfterRParen->Next->isOneOf(tok::identifier, tok::numeric_constant)) {
+ AfterRParen->Next->isNoneOf(tok::identifier, tok::numeric_constant)) {
return false;
}
@@ -2948,7 +2948,7 @@ private:
// Search for unexpected tokens.
for (Prev = BeforeRParen; Prev != LParen; Prev = Prev->Previous)
- if (!Prev->isOneOf(tok::kw_const, tok::identifier, tok::coloncolon))
+ if (Prev->isNoneOf(tok::kw_const, tok::identifier, tok::coloncolon))
return false;
return true;
@@ -3740,7 +3740,7 @@ void TokenAnnotator::annotate(AnnotatedLine &Line) {
const bool InRequiresExpression = Line.Type == LT_RequiresExpression;
for (auto &Child : Line.Children) {
if (InRequiresExpression &&
- !Child->First->isOneOf(tok::kw_typename, tok::kw_requires,
+ Child->First->isNoneOf(tok::kw_typename, tok::kw_requires,
TT_CompoundRequirementLBrace)) {
Child->Type = LT_SimpleRequirement;
}
@@ -3857,7 +3857,7 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
// Find parentheses of parameter list.
if (Current.is(tok::kw_operator)) {
if (Previous.Tok.getIdentifierInfo() &&
- !Previous.isOneOf(tok::kw_return, tok::kw_co_return)) {
+ Previous.isNoneOf(tok::kw_return, tok::kw_co_return)) {
return true;
}
if (Previous.is(tok::r_paren) && Previous.is(TT_TypeDeclarationParen)) {
@@ -4328,7 +4328,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
// Slightly prefer formatting local lambda definitions like functions.
if (Right.is(TT_LambdaLSquare) && Left.is(tok::equal))
return 35;
- if (!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
+ if (Right.isNoneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_ArrayInitializerLSquare,
TT_DesignatedInitializerLSquare, TT_AttributeSquare)) {
return 500;
@@ -4519,7 +4519,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
const FormatToken &Left,
const FormatToken &Right) const {
if (Left.is(tok::kw_return) &&
- !Right.isOneOf(tok::semi, tok::r_paren, tok::hashhash)) {
+ Right.isNoneOf(tok::semi, tok::r_paren, tok::hashhash)) {
return true;
}
if (Left.is(tok::kw_throw) && Right.is(tok::l_paren) && Right.MatchingParen &&
@@ -4579,7 +4579,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
}
// co_await (x), co_yield (x), co_return (x)
if (Left.isOneOf(tok::kw_co_await, tok::kw_co_yield, tok::kw_co_return) &&
- !Right.isOneOf(tok::semi, tok::r_paren)) {
+ Right.isNoneOf(tok::semi, tok::r_paren)) {
return true;
}
@@ -4656,7 +4656,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return getTokenPointerOrReferenceAlignment(Right) !=
FormatStyle::PAS_Left;
}
- return !Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
+ return Left.isNoneOf(TT_PointerOrReference, tok::l_paren) &&
(getTokenPointerOrReferenceAlignment(Right) !=
FormatStyle::PAS_Left ||
(Line.IsMultiVariableDeclStmt &&
@@ -4729,7 +4729,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
const auto *LParen = Right.Next->MatchingParen;
return !LParen || LParen->isNot(TT_FunctionTypeLParen);
}
- return !BeforeLeft->isOneOf(tok::l_paren, tok::l_square);
+ return BeforeLeft->isNoneOf(tok::l_paren, tok::l_square);
}
// Ensure right pointer alignment with ellipsis e.g. int *...P
if (Left.is(tok::ellipsis) && BeforeLeft &&
@@ -4808,10 +4808,10 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
TT_LambdaLSquare)));
}
if (Right.is(tok::l_square) &&
- !Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
+ Right.isNoneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_DesignatedInitializerLSquare,
TT_StructuredBindingLSquare, TT_AttributeSquare) &&
- !Left.isOneOf(tok::numeric_constant, TT_DictLiteral) &&
+ Left.isNoneOf(tok::numeric_constant, TT_DictLiteral) &&
!(Left.isNot(tok::r_square) && Style.SpaceBeforeSquareBrackets &&
Right.is(TT_ArraySubscriptLSquare))) {
return false;
@@ -4894,7 +4894,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return Style.SpaceBeforeParensOptions.AfterFunctionDefinitionName ||
spaceRequiredBeforeParens(Right);
}
- if (!BeforeLeft || !BeforeLeft->isOneOf(tok::period, tok::arrow)) {
+ if (!BeforeLeft || BeforeLeft->isNoneOf(tok::period, tok::arrow)) {
if (Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch)) {
return Style.SpaceBeforeParensOptions.AfterControlStatements ||
spaceRequiredBeforeParens(Right);
@@ -4917,7 +4917,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.is(tok::at) && Right.isNot(tok::objc_not_keyword))
return false;
if (Right.is(TT_UnaryOperator)) {
- return !Left.isOneOf(tok::l_paren, tok::l_square, tok::at) &&
+ return Left.isNoneOf(tok::l_paren, tok::l_square, tok::at) &&
(Left.isNot(tok::colon) || Left.isNot(TT_ObjCMethodExpr));
}
// No space between the variable name and the initializer list.
@@ -5260,7 +5260,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(tok::ellipsis))
return false;
if (Left.is(TT_TemplateCloser) &&
- !Right.isOneOf(tok::equal, tok::l_brace, tok::comma, tok::l_square,
+ Right.isNoneOf(tok::equal, tok::l_brace, tok::comma, tok::l_square,
Keywords.kw_implements, Keywords.kw_extends)) {
// Type assertions ('<type>expr') are not followed by whitespace. Other
// locations that should have whitespace following are identified by the
@@ -5299,7 +5299,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// Add space between things in a primitive's state table unless in a
// transition like `(0?)`.
if ((Left.is(TT_VerilogTableItem) &&
- !Right.isOneOf(tok::r_paren, tok::semi)) ||
+ Right.isNoneOf(tok::r_paren, tok::semi)) ||
(Right.is(TT_VerilogTableItem) && Left.isNot(tok::l_paren))) {
const FormatToken *Next = Right.getNextNonComment();
return !(Next && Next->is(tok::r_paren));
@@ -5348,8 +5348,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// previous rule.
if ((Right.is(Keywords.kw_apostrophe) ||
(Right.is(BK_BracedInit) && Right.is(tok::l_brace))) &&
- !(Left.isOneOf(Keywords.kw_assign, Keywords.kw_unique) ||
- Keywords.isVerilogWordOperator(Left)) &&
+ Left.isNoneOf(Keywords.kw_assign, Keywords.kw_unique) &&
+ !Keywords.isVerilogWordOperator(Left) &&
(Left.isOneOf(tok::r_square, tok::r_paren, tok::r_brace,
tok::numeric_constant) ||
Keywords.isWordLike(Left))) {
@@ -5549,14 +5549,14 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return Right.hasWhitespaceBefore();
}
if (Right.is(tok::coloncolon) &&
- !Left.isOneOf(tok::l_brace, tok::comment, tok::l_paren)) {
+ Left.isNoneOf(tok::l_brace, tok::comment, tok::l_paren)) {
// Put a space between < and :: in vector< ::std::string >
return (Left.is(TT_TemplateOpener) &&
((Style.Standard < FormatStyle::LS_Cpp11) ||
ShouldAddSpacesInAngles())) ||
- !(Left.isOneOf(tok::l_paren, tok::r_paren, tok::l_square,
- tok::kw___super, TT_TemplateOpener,
- TT_TemplateCloser)) ||
+ Left.isNoneOf(tok::l_paren, tok::r_paren, tok::l_square,
+ tok::kw___super, TT_TemplateOpener,
+ TT_TemplateCloser) ||
(Left.is(tok::l_paren) && Style.SpacesInParensOptions.Other);
}
if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser)))
@@ -5567,7 +5567,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
}
// Space before TT_StructuredBindingLSquare.
if (Right.is(TT_StructuredBindingLSquare)) {
- return !Left.isOneOf(tok::amp, tok::ampamp) ||
+ return Left.isNoneOf(tok::amp, tok::ampamp) ||
getTokenReferenceAlignment(Left) != FormatStyle::PAS_Right;
}
// Space before & or && following a TT_StructuredBindingLSquare.
@@ -5599,7 +5599,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// Returns 'true' if 'Tok' is a brace we'd want to break before in Allman style.
static bool isAllmanBrace(const FormatToken &Tok) {
return Tok.is(tok::l_brace) && Tok.is(BK_Block) &&
- !Tok.isOneOf(TT_ObjCBlockLBrace, TT_LambdaLBrace, TT_DictLiteral);
+ Tok.isNoneOf(TT_ObjCBlockLBrace, TT_LambdaLBrace, TT_DictLiteral);
}
// Returns 'true' if 'Tok' is a function argument.
@@ -5617,7 +5617,7 @@ isEmptyLambdaAllowed(const FormatToken &Tok,
static bool isAllmanLambdaBrace(const FormatToken &Tok) {
return Tok.is(tok::l_brace) && Tok.is(BK_Block) &&
- !Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral);
+ Tok.isNoneOf(TT_ObjCBlockLBrace, TT_DictLiteral);
}
bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
@@ -5686,7 +5686,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
tok::kw_const) &&
// kw_var/kw_let are pseudo-tokens that are tok::identifier, so match
// above.
- !Line.First->isOneOf(Keywords.kw_var, Keywords.kw_let)) {
+ Line.First->isNoneOf(Keywords.kw_var, Keywords.kw_let)) {
// Object literals on the top level of a file are treated as "enum-style".
// Each key/value pair is put on a separate line, instead of bin-packing.
return true;
@@ -5831,7 +5831,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
if (Right.is(tok::comment)) {
- return !Left.isOneOf(BK_BracedInit, TT_CtorInitializerColon) &&
+ return Left.isNoneOf(BK_BracedInit, TT_CtorInitializerColon) &&
Right.NewlinesBefore > 0 && Right.HasUnescapedNewline;
}
if (Left.isTrailingComment())
@@ -5873,7 +5873,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
case FormatStyle::RCPS_WithPreceding:
return Right.isNot(tok::semi);
case FormatStyle::RCPS_OwnLineWithBrace:
- return !Right.isOneOf(tok::semi, tok::l_brace);
+ return Right.isNoneOf(tok::semi, tok::l_brace);
default:
break;
}
@@ -6000,7 +6000,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// Put multiple Java annotation on a new line.
if ((Style.isJava() || Style.isJavaScript()) &&
Left.is(TT_LeadingJavaAnnotation) &&
- !Right.isOneOf(TT_LeadingJavaAnnotation, tok::l_paren) &&
+ Right.isNoneOf(TT_LeadingJavaAnnotation, tok::l_paren) &&
(Line.Last->is(tok::l_brace) || Style.BreakAfterJavaFieldAnnotations)) {
return true;
}
@@ -6206,7 +6206,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return false;
// Avoid to break after '(' in the cases that is in bang operators.
if (Right.is(tok::l_paren)) {
- return !Left.isOneOf(TT_TableGenBangOperator, TT_TableGenCondOperator,
+ return Left.isNoneOf(TT_TableGenBangOperator, TT_TableGenCondOperator,
TT_TemplateCloser);
}
// Avoid to break between the value and its suffix part.
@@ -6294,7 +6294,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
}
if (Right.is(tok::colon) &&
- !Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon,
+ Right.isNoneOf(TT_CtorInitializerColon, TT_InlineASMColon,
TT_BitFieldColon)) {
return false;
}
@@ -6378,7 +6378,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
}
if (Left.isOneOf(TT_TemplateCloser, TT_UnaryOperator, tok::kw_operator))
return false;
- if (Left.is(tok::equal) && !Right.isOneOf(tok::kw_default, tok::kw_delete) &&
+ if (Left.is(tok::equal) && Right.isNoneOf(tok::kw_default, tok::kw_delete) &&
Line.Type == LT_VirtualFunctionDecl && Left.NestingLevel == 0) {
return false;
}
@@ -6405,7 +6405,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// Allow breaking after a trailing annotation, e.g. after a method
// declaration.
if (Left.is(TT_TrailingAnnotation)) {
- return !Right.isOneOf(tok::l_brace, tok::semi, tok::equal, tok::l_paren,
+ return Right.isNoneOf(tok::l_brace, tok::semi, tok::equal, tok::l_paren,
tok::less, tok::coloncolon);
}
@@ -6448,7 +6448,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.is(tok::kw_typename) && Left.isNot(tok::kw_const))
return true;
if ((Left.isBinaryOperator() || Left.is(TT_BinaryOperator)) &&
- !Left.isOneOf(tok::arrowstar, tok::lessless) &&
+ Left.isNoneOf(tok::arrowstar, tok::lessless) &&
Style.BreakBeforeBinaryOperators != FormatStyle::BOS_All &&
(Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None ||
Left.getPrecedence() == prec::Assignment)) {
diff --git a/clang/lib/Format/UnwrappedLineFormatter.cpp b/clang/lib/Format/UnwrappedLineFormatter.cpp
index ac9d147..ac9c81d 100644
--- a/clang/lib/Format/UnwrappedLineFormatter.cpp
+++ b/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -506,7 +506,7 @@ private:
(NextLine.First->is(tok::r_brace) &&
!Style.BraceWrapping.SplitEmptyRecord);
} else if (TheLine->InPPDirective ||
- !TheLine->First->isOneOf(tok::kw_class, tok::kw_enum,
+ TheLine->First->isNoneOf(tok::kw_class, tok::kw_enum,
tok::kw_struct)) {
// Try to merge a block with left brace unwrapped that wasn't yet
// covered.
@@ -686,8 +686,8 @@ private:
}
Limit = limitConsideringMacros(I + 1, E, Limit);
AnnotatedLine &Line = **I;
- if (Line.First->isNot(tok::kw_do) && Line.First->isNot(tok::kw_else) &&
- Line.Last->isNot(tok::kw_else) && Line.Last->isNot(tok::r_paren)) {
+ if (Line.First->isNoneOf(tok::kw_do, tok::kw_else) &&
+ Line.Last->isNoneOf(tok::kw_else, tok::r_paren)) {
return 0;
}
// Only merge `do while` if `do` is the only statement on the line.
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index 6948b3d..2879743 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -405,7 +405,7 @@ bool UnwrappedLineParser::parseLevel(const FormatToken *OpeningBrace,
case tok::r_brace:
if (OpeningBrace) {
if (!Style.RemoveBracesLLVM || Line->InPPDirective ||
- !OpeningBrace->isOneOf(TT_ControlStatementLBrace, TT_ElseLBrace)) {
+ OpeningBrace->isNoneOf(TT_ControlStatementLBrace, TT_ElseLBrace)) {
return false;
}
if (FormatTok->isNot(tok::r_brace) || StatementCount != 1 || HasLabel ||
@@ -427,7 +427,7 @@ bool UnwrappedLineParser::parseLevel(const FormatToken *OpeningBrace,
unsigned StoredPosition = Tokens->getPosition();
auto *Next = Tokens->getNextNonComment();
FormatTok = Tokens->setPosition(StoredPosition);
- if (!Next->isOneOf(tok::colon, tok::arrow)) {
+ if (Next->isNoneOf(tok::colon, tok::arrow)) {
// default not followed by `:` or `->` is not a case label; treat it
// like an identifier.
parseStructuralElement();
@@ -584,7 +584,7 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
ProbablyBracedList =
ProbablyBracedList ||
(NextTok->is(tok::identifier) &&
- !PrevTok->isOneOf(tok::semi, tok::r_brace, tok::l_brace));
+ PrevTok->isNoneOf(tok::semi, tok::r_brace, tok::l_brace));
ProbablyBracedList = ProbablyBracedList ||
(NextTok->is(tok::semi) &&
@@ -607,7 +607,7 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// A statement can end with only `;` (simple statement), a block
// closing brace (compound statement), or `:` (label statement).
// If PrevTok is a block opening brace, Tok ends an empty block.
- !PrevTok->isOneOf(tok::semi, BK_Block, tok::colon)) {
+ PrevTok->isNoneOf(tok::semi, BK_Block, tok::colon)) {
ProbablyBracedList = true;
}
}
@@ -1157,7 +1157,7 @@ void UnwrappedLineParser::parsePPDefine() {
IncludeGuard = IG_Defined;
IncludeGuardToken = nullptr;
for (auto &Line : Lines) {
- if (!Line.Tokens.front().Tok->isOneOf(tok::comment, tok::hash)) {
+ if (Line.Tokens.front().Tok->isNoneOf(tok::comment, tok::hash)) {
IncludeGuard = IG_Rejected;
break;
}
@@ -1233,7 +1233,7 @@ void UnwrappedLineParser::parsePPUnknown() {
static bool tokenCanStartNewLine(const FormatToken &Tok) {
// Semicolon can be a null-statement, l_square can be a start of a macro or
// a C++11 attribute, but this doesn't seem to be common.
- return !Tok.isOneOf(tok::semi, tok::l_brace,
+ return Tok.isNoneOf(tok::semi, tok::l_brace,
// Tokens that can only be used as binary operators and a
// part of overloaded operator names.
tok::period, tok::periodstar, tok::arrow, tok::arrowstar,
@@ -1256,7 +1256,7 @@ static bool mustBeJSIdent(const AdditionalKeywords &Keywords,
// FIXME: This returns true for C/C++ keywords like 'struct'.
return FormatTok->is(tok::identifier) &&
(!FormatTok->Tok.getIdentifierInfo() ||
- !FormatTok->isOneOf(
+ FormatTok->isNoneOf(
Keywords.kw_in, Keywords.kw_of, Keywords.kw_as, Keywords.kw_async,
Keywords.kw_await, Keywords.kw_yield, Keywords.kw_finally,
Keywords.kw_function, Keywords.kw_import, Keywords.kw_is,
@@ -1322,7 +1322,7 @@ static bool isC78ParameterDecl(const FormatToken *Tok, const FormatToken *Next,
return false;
if (!isC78Type(*Tok) &&
- !Tok->isOneOf(tok::kw_register, tok::kw_struct, tok::kw_union)) {
+ Tok->isNoneOf(tok::kw_register, tok::kw_struct, tok::kw_union)) {
return false;
}
@@ -1345,7 +1345,7 @@ bool UnwrappedLineParser::parseModuleImport() {
if (auto Token = Tokens->peekNextToken(/*SkipComment=*/true);
!Token->Tok.getIdentifierInfo() &&
- !Token->isOneOf(tok::colon, tok::less, tok::string_literal)) {
+ Token->isNoneOf(tok::colon, tok::less, tok::string_literal)) {
return false;
}
@@ -1357,7 +1357,7 @@ bool UnwrappedLineParser::parseModuleImport() {
// Handle import <foo/bar.h> as we would an include statement.
else if (FormatTok->is(tok::less)) {
nextToken();
- while (!FormatTok->isOneOf(tok::semi, tok::greater) && !eof()) {
+ while (FormatTok->isNoneOf(tok::semi, tok::greater) && !eof()) {
// Mark tokens up to the trailing line comments as implicit string
// literals.
if (FormatTok->isNot(tok::comment) &&
@@ -2394,13 +2394,13 @@ bool UnwrappedLineParser::tryToParseLambdaIntroducer() {
const auto *BeforeRParen = Previous->getPreviousNonComment();
// Lambdas can be cast to function types only, e.g. `std::function<int()>`
// and `int (*)()`.
- if (!BeforeRParen || !BeforeRParen->isOneOf(tok::greater, tok::r_paren))
+ if (!BeforeRParen || BeforeRParen->isNoneOf(tok::greater, tok::r_paren))
return false;
} else if (Previous->is(tok::star)) {
Previous = Previous->getPreviousNonComment();
}
if (Previous && Previous->Tok.getIdentifierInfo() &&
- !Previous->isOneOf(tok::kw_return, tok::kw_co_await, tok::kw_co_yield,
+ Previous->isNoneOf(tok::kw_return, tok::kw_co_await, tok::kw_co_yield,
tok::kw_co_return)) {
return false;
}
@@ -2450,7 +2450,7 @@ void UnwrappedLineParser::tryToParseJSFunction() {
if (FormatTok->is(tok::l_brace))
tryToParseBracedList();
else
- while (!FormatTok->isOneOf(tok::l_brace, tok::semi) && !eof())
+ while (FormatTok->isNoneOf(tok::l_brace, tok::semi) && !eof())
nextToken();
}
@@ -3108,11 +3108,11 @@ void UnwrappedLineParser::parseTryCatch() {
for (bool SeenCatch = false;;) {
if (FormatTok->is(tok::at))
nextToken();
- if (!(FormatTok->isOneOf(tok::kw_catch, Keywords.kw___except,
- tok::kw___finally, tok::objc_catch,
- tok::objc_finally) ||
- ((Style.isJava() || Style.isJavaScript()) &&
- FormatTok->is(Keywords.kw_finally)))) {
+ if (FormatTok->isNoneOf(tok::kw_catch, Keywords.kw___except,
+ tok::kw___finally, tok::objc_catch,
+ tok::objc_finally) &&
+ !((Style.isJava() || Style.isJavaScript()) &&
+ FormatTok->is(Keywords.kw_finally))) {
break;
}
if (FormatTok->is(tok::kw_catch))
@@ -3290,7 +3290,7 @@ void UnwrappedLineParser::parseForOrWhileLoop(bool HasParens) {
Keywords.kw_repeat))) &&
"'for', 'while' or foreach macro expected");
const bool KeepBraces = !Style.RemoveBracesLLVM ||
- !FormatTok->isOneOf(tok::kw_for, tok::kw_while);
+ FormatTok->isNoneOf(tok::kw_for, tok::kw_while);
nextToken();
// JS' for await ( ...
@@ -4339,7 +4339,7 @@ void UnwrappedLineParser::parseJavaScriptEs6ImportExport() {
// to the terminating `;`. For everything else, just return and continue
// parsing the structural element, i.e. the declaration or expression for
// `export default`.
- if (!IsImport && !FormatTok->isOneOf(tok::l_brace, tok::star) &&
+ if (!IsImport && FormatTok->isNoneOf(tok::l_brace, tok::star) &&
!FormatTok->isStringLiteral() &&
!(FormatTok->is(Keywords.kw_type) &&
Tokens->peekNextToken()->isOneOf(tok::l_brace, tok::star))) {
@@ -4886,7 +4886,7 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
const auto *Next = Tokens->peekNextToken();
if ((Style.isVerilog() && !Keywords.isVerilogPPDirective(*Next)) ||
(Style.isTableGen() &&
- !Next->isOneOf(tok::kw_else, tok::pp_define, tok::pp_ifdef,
+ Next->isNoneOf(tok::kw_else, tok::pp_define, tok::pp_ifdef,
tok::pp_ifndef, tok::pp_endif))) {
break;
}
diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp
index 30c06bb..54f366f 100644
--- a/clang/lib/Format/WhitespaceManager.cpp
+++ b/clang/lib/Format/WhitespaceManager.cpp
@@ -462,7 +462,7 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
if ((Style.PointerAlignment == FormatStyle::PAS_Right ||
Style.ReferenceAlignment == FormatStyle::RAS_Right) &&
CurrentChange.Spaces != 0 &&
- !CurrentChange.Tok->isOneOf(tok::equal, tok::r_paren,
+ CurrentChange.Tok->isNoneOf(tok::equal, tok::r_paren,
TT_TemplateCloser)) {
const bool ReferenceNotRightAligned =
Style.ReferenceAlignment != FormatStyle::RAS_Right &&
diff --git a/clang/lib/Frontend/ChainedIncludesSource.cpp b/clang/lib/Frontend/ChainedIncludesSource.cpp
index 82249f8..049277c 100644
--- a/clang/lib/Frontend/ChainedIncludesSource.cpp
+++ b/clang/lib/Frontend/ChainedIncludesSource.cpp
@@ -129,7 +129,7 @@ clang::createChainedIncludesSource(CompilerInstance &CI,
Clang->setTarget(TargetInfo::CreateTargetInfo(
Clang->getDiagnostics(), Clang->getInvocation().getTargetOpts()));
Clang->createFileManager();
- Clang->createSourceManager(Clang->getFileManager());
+ Clang->createSourceManager();
Clang->createPreprocessor(TU_Prefix);
Clang->getDiagnosticClient().BeginSourceFile(Clang->getLangOpts(),
&Clang->getPreprocessor());
diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp
index b1fb905..5844366 100644
--- a/clang/lib/Frontend/CompilerInstance.cpp
+++ b/clang/lib/Frontend/CompilerInstance.cpp
@@ -382,17 +382,18 @@ IntrusiveRefCntPtr<DiagnosticsEngine> CompilerInstance::createDiagnostics(
// File Manager
-FileManager *CompilerInstance::createFileManager() {
+void CompilerInstance::createFileManager() {
assert(VFS && "CompilerInstance needs a VFS for creating FileManager");
FileMgr = llvm::makeIntrusiveRefCnt<FileManager>(getFileSystemOpts(), VFS);
- return FileMgr.get();
}
// Source Manager
-void CompilerInstance::createSourceManager(FileManager &FileMgr) {
- SourceMgr =
- llvm::makeIntrusiveRefCnt<SourceManager>(getDiagnostics(), FileMgr);
+void CompilerInstance::createSourceManager() {
+ assert(Diagnostics && "DiagnosticsEngine needed for creating SourceManager");
+ assert(FileMgr && "FileManager needed for creating SourceManager");
+ SourceMgr = llvm::makeIntrusiveRefCnt<SourceManager>(getDiagnostics(),
+ getFileManager());
}
// Initialize the remapping of files to alternative contents, e.g.,
@@ -1186,7 +1187,7 @@ std::unique_ptr<CompilerInstance> CompilerInstance::cloneForModuleCompileImpl(
if (llvm::is_contained(DiagOpts.SystemHeaderWarningsModules, ModuleName))
Instance.getDiagnostics().setSuppressSystemWarnings(false);
- Instance.createSourceManager(Instance.getFileManager());
+ Instance.createSourceManager();
SourceManager &SourceMgr = Instance.getSourceManager();
if (ThreadSafeConfig) {
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index 6cc3b65..1b63c40 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -879,7 +879,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// file, otherwise the CompilerInstance will happily destroy them.
CI.setVirtualFileSystem(AST->getFileManager().getVirtualFileSystemPtr());
CI.setFileManager(AST->getFileManagerPtr());
- CI.createSourceManager(CI.getFileManager());
+ CI.createSourceManager();
CI.getSourceManager().initializeForReplay(AST->getSourceManager());
// Preload all the module files loaded transitively by the AST unit. Also
@@ -971,13 +971,10 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// Set up the file system, file and source managers, if needed.
if (!CI.hasVirtualFileSystem())
CI.createVirtualFileSystem();
- if (!CI.hasFileManager()) {
- if (!CI.createFileManager()) {
- return false;
- }
- }
+ if (!CI.hasFileManager())
+ CI.createFileManager();
if (!CI.hasSourceManager()) {
- CI.createSourceManager(CI.getFileManager());
+ CI.createSourceManager();
if (CI.getDiagnosticOpts().getFormat() == DiagnosticOptions::SARIF) {
static_cast<SARIFDiagnosticPrinter *>(&CI.getDiagnosticClient())
->setSarifWriter(
diff --git a/clang/lib/Lex/HeaderSearch.cpp b/clang/lib/Lex/HeaderSearch.cpp
index ae09f70..238c5e2 100644
--- a/clang/lib/Lex/HeaderSearch.cpp
+++ b/clang/lib/Lex/HeaderSearch.cpp
@@ -2077,7 +2077,7 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
llvm::SmallString<32> FilePath = File;
if (!WorkingDir.empty() && !path::is_absolute(FilePath))
- fs::make_absolute(WorkingDir, FilePath);
+ path::make_absolute(WorkingDir, FilePath);
// remove_dots switches to backslashes on windows as a side-effect!
// We always want to suggest forward slashes for includes.
// (not remove_dots(..., posix) as that misparses windows paths).
@@ -2091,7 +2091,7 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
// `BestPrefixLength` accordingly.
auto CheckDir = [&](llvm::SmallString<32> Dir) -> bool {
if (!WorkingDir.empty() && !path::is_absolute(Dir))
- fs::make_absolute(WorkingDir, Dir);
+ path::make_absolute(WorkingDir, Dir);
path::remove_dots(Dir, /*remove_dot_dot=*/true);
for (auto NI = path::begin(File), NE = path::end(File),
DI = path::begin(Dir), DE = path::end(Dir);
diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp
index 22c01c4..d6cd7eb 100644
--- a/clang/lib/Parse/ParseDecl.cpp
+++ b/clang/lib/Parse/ParseDecl.cpp
@@ -2083,6 +2083,9 @@ void Parser::SkipMalformedDecl() {
return;
break;
+ case tok::kw_extern:
+ // 'extern' at the start of a line is almost certainly a good
+ // place to pick back up parsing
case tok::kw_namespace:
// 'namespace' at the start of a line is almost certainly a good
// place to pick back up parsing, except in an Objective-C
diff --git a/clang/lib/Parse/ParseHLSLRootSignature.cpp b/clang/lib/Parse/ParseHLSLRootSignature.cpp
index 3b16efb..7be6eec 100644
--- a/clang/lib/Parse/ParseHLSLRootSignature.cpp
+++ b/clang/lib/Parse/ParseHLSLRootSignature.cpp
@@ -485,6 +485,9 @@ std::optional<StaticSampler> RootSignatureParser::parseStaticSampler() {
if (Params->Visibility.has_value())
Sampler.Visibility = Params->Visibility.value();
+ if (Params->Flags.has_value())
+ Sampler.Flags = Params->Flags.value();
+
return Sampler;
}
@@ -926,6 +929,20 @@ RootSignatureParser::parseStaticSamplerParams() {
if (!Visibility.has_value())
return std::nullopt;
Params.Visibility = Visibility;
+ } else if (tryConsumeExpectedToken(TokenKind::kw_flags)) {
+ // `flags` `=` STATIC_SAMPLE_FLAGS
+ if (Params.Flags.has_value()) {
+ reportDiag(diag::err_hlsl_rootsig_repeat_param) << CurToken.TokKind;
+ return std::nullopt;
+ }
+
+ if (consumeExpectedToken(TokenKind::pu_equal))
+ return std::nullopt;
+
+ auto Flags = parseStaticSamplerFlags(TokenKind::kw_flags);
+ if (!Flags.has_value())
+ return std::nullopt;
+ Params.Flags = Flags;
} else {
consumeNextToken(); // let diagnostic be at the start of invalid token
reportDiag(diag::err_hlsl_invalid_token)
@@ -1255,6 +1272,50 @@ RootSignatureParser::parseDescriptorRangeFlags(TokenKind Context) {
return Flags;
}
+std::optional<llvm::dxbc::StaticSamplerFlags>
+RootSignatureParser::parseStaticSamplerFlags(TokenKind Context) {
+ assert(CurToken.TokKind == TokenKind::pu_equal &&
+ "Expects to only be invoked starting at given keyword");
+
+ // Handle the edge-case of '0' to specify no flags set
+ if (tryConsumeExpectedToken(TokenKind::int_literal)) {
+ if (!verifyZeroFlag()) {
+ reportDiag(diag::err_hlsl_rootsig_non_zero_flag);
+ return std::nullopt;
+ }
+ return llvm::dxbc::StaticSamplerFlags::None;
+ }
+
+ TokenKind Expected[] = {
+#define STATIC_SAMPLER_FLAG_ENUM(NAME, LIT) TokenKind::en_##NAME,
+#include "clang/Lex/HLSLRootSignatureTokenKinds.def"
+ };
+
+ std::optional<llvm::dxbc::StaticSamplerFlags> Flags;
+
+ do {
+ if (tryConsumeExpectedToken(Expected)) {
+ switch (CurToken.TokKind) {
+#define STATIC_SAMPLER_FLAG_ENUM(NAME, LIT) \
+ case TokenKind::en_##NAME: \
+ Flags = maybeOrFlag<llvm::dxbc::StaticSamplerFlags>( \
+ Flags, llvm::dxbc::StaticSamplerFlags::NAME); \
+ break;
+#include "clang/Lex/HLSLRootSignatureTokenKinds.def"
+ default:
+ llvm_unreachable("Switch for consumed enum token was not provided");
+ }
+ } else {
+ consumeNextToken(); // consume token to point at invalid token
+ reportDiag(diag::err_hlsl_invalid_token)
+ << /*value=*/1 << /*value of*/ Context;
+ return std::nullopt;
+ }
+ } while (tryConsumeExpectedToken(TokenKind::pu_or));
+
+ return Flags;
+}
+
std::optional<uint32_t> RootSignatureParser::handleUIntLiteral() {
// Parse the numeric value and do semantic checks on its specification
clang::NumericLiteralParser Literal(
diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
index 97a6a7f..3c20ccd 100644
--- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
+++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp
@@ -138,7 +138,16 @@ private:
// LastStmt - refers to the last statement in the method body; referencing
// LastStmt will remove the statement from the method body since
// it will be linked from the new expression being constructed.
- enum class PlaceHolder { _0, _1, _2, _3, _4, Handle = 128, LastStmt };
+ enum class PlaceHolder {
+ _0,
+ _1,
+ _2,
+ _3,
+ _4,
+ Handle = 128,
+ CounterHandle,
+ LastStmt
+ };
Expr *convertPlaceholder(PlaceHolder PH);
Expr *convertPlaceholder(LocalVar &Var);
@@ -178,10 +187,14 @@ public:
template <typename ResourceT, typename ValueT>
BuiltinTypeMethodBuilder &setHandleFieldOnResource(ResourceT ResourceRecord,
ValueT HandleValue);
+ template <typename T>
+ BuiltinTypeMethodBuilder &
+ accessCounterHandleFieldOnResource(T ResourceRecord);
template <typename T> BuiltinTypeMethodBuilder &returnValue(T ReturnValue);
BuiltinTypeMethodBuilder &returnThis();
BuiltinTypeDeclBuilder &finalize();
Expr *getResourceHandleExpr();
+ Expr *getResourceCounterHandleExpr();
private:
void createDecl();
@@ -346,6 +359,8 @@ TemplateParameterListBuilder::finalizeTemplateArgs(ConceptDecl *CD) {
Expr *BuiltinTypeMethodBuilder::convertPlaceholder(PlaceHolder PH) {
if (PH == PlaceHolder::Handle)
return getResourceHandleExpr();
+ if (PH == PlaceHolder::CounterHandle)
+ return getResourceCounterHandleExpr();
if (PH == PlaceHolder::LastStmt) {
assert(!StmtsList.empty() && "no statements in the list");
@@ -467,6 +482,18 @@ Expr *BuiltinTypeMethodBuilder::getResourceHandleExpr() {
OK_Ordinary);
}
+Expr *BuiltinTypeMethodBuilder::getResourceCounterHandleExpr() {
+ ensureCompleteDecl();
+
+ ASTContext &AST = DeclBuilder.SemaRef.getASTContext();
+ CXXThisExpr *This = CXXThisExpr::Create(
+ AST, SourceLocation(), Method->getFunctionObjectParameterType(), true);
+ FieldDecl *HandleField = DeclBuilder.getResourceCounterHandleField();
+ return MemberExpr::CreateImplicit(AST, This, false, HandleField,
+ HandleField->getType(), VK_LValue,
+ OK_Ordinary);
+}
+
BuiltinTypeMethodBuilder &
BuiltinTypeMethodBuilder::declareLocalVar(LocalVar &Var) {
ensureCompleteDecl();
@@ -584,6 +611,22 @@ BuiltinTypeMethodBuilder::setHandleFieldOnResource(ResourceT ResourceRecord,
}
template <typename T>
+BuiltinTypeMethodBuilder &
+BuiltinTypeMethodBuilder::accessCounterHandleFieldOnResource(T ResourceRecord) {
+ ensureCompleteDecl();
+
+ Expr *ResourceExpr = convertPlaceholder(ResourceRecord);
+
+ ASTContext &AST = DeclBuilder.SemaRef.getASTContext();
+ FieldDecl *HandleField = DeclBuilder.getResourceCounterHandleField();
+ MemberExpr *HandleExpr = MemberExpr::CreateImplicit(
+ AST, ResourceExpr, false, HandleField, HandleField->getType(), VK_LValue,
+ OK_Ordinary);
+ StmtsList.push_back(HandleExpr);
+ return *this;
+}
+
+template <typename T>
BuiltinTypeMethodBuilder &BuiltinTypeMethodBuilder::returnValue(T ReturnValue) {
ensureCompleteDecl();
@@ -722,8 +765,31 @@ BuiltinTypeDeclBuilder::addMemberVariable(StringRef Name, QualType Type,
return *this;
}
+BuiltinTypeDeclBuilder &
+BuiltinTypeDeclBuilder::addBufferHandles(ResourceClass RC, bool IsROV,
+ bool RawBuffer, bool HasCounter,
+ AccessSpecifier Access) {
+ addHandleMember(RC, IsROV, RawBuffer, Access);
+ if (HasCounter)
+ addCounterHandleMember(RC, IsROV, RawBuffer, Access);
+ return *this;
+}
+
BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addHandleMember(
ResourceClass RC, bool IsROV, bool RawBuffer, AccessSpecifier Access) {
+ return addResourceMember("__handle", RC, IsROV, RawBuffer,
+ /*IsCounter=*/false, Access);
+}
+
+BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCounterHandleMember(
+ ResourceClass RC, bool IsROV, bool RawBuffer, AccessSpecifier Access) {
+ return addResourceMember("__counter_handle", RC, IsROV, RawBuffer,
+ /*IsCounter=*/true, Access);
+}
+
+BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addResourceMember(
+ StringRef MemberName, ResourceClass RC, bool IsROV, bool RawBuffer,
+ bool IsCounter, AccessSpecifier Access) {
assert(!Record->isCompleteDefinition() && "record is already complete");
ASTContext &Ctx = SemaRef.getASTContext();
@@ -739,9 +805,12 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addHandleMember(
ElementTypeInfo
? HLSLContainedTypeAttr::CreateImplicit(Ctx, ElementTypeInfo)
: nullptr};
+ if (IsCounter)
+ Attrs.push_back(HLSLIsCounterAttr::CreateImplicit(Ctx));
+
if (CreateHLSLAttributedResourceType(SemaRef, Ctx.HLSLResourceTy, Attrs,
AttributedResTy))
- addMemberVariable("__handle", AttributedResTy, {}, Access);
+ addMemberVariable(MemberName, AttributedResTy, {}, Access);
return *this;
}
@@ -844,12 +913,17 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCopyConstructor() {
using PH = BuiltinTypeMethodBuilder::PlaceHolder;
- return BuiltinTypeMethodBuilder(*this, /*Name=*/"", AST.VoidTy,
- /*IsConst=*/false, /*IsCtor=*/true)
- .addParam("other", ConstRecordRefType)
+ BuiltinTypeMethodBuilder MMB(*this, /*Name=*/"", AST.VoidTy,
+ /*IsConst=*/false, /*IsCtor=*/true);
+ MMB.addParam("other", ConstRecordRefType)
.accessHandleFieldOnResource(PH::_0)
- .assign(PH::Handle, PH::LastStmt)
- .finalize();
+ .assign(PH::Handle, PH::LastStmt);
+
+ if (getResourceCounterHandleField())
+ MMB.accessCounterHandleFieldOnResource(PH::_0).assign(PH::CounterHandle,
+ PH::LastStmt);
+
+ return MMB.finalize();
}
BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCopyAssignmentOperator() {
@@ -863,12 +937,16 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCopyAssignmentOperator() {
using PH = BuiltinTypeMethodBuilder::PlaceHolder;
DeclarationName Name = AST.DeclarationNames.getCXXOperatorName(OO_Equal);
- return BuiltinTypeMethodBuilder(*this, Name, RecordRefType)
- .addParam("other", ConstRecordRefType)
+ BuiltinTypeMethodBuilder MMB(*this, Name, RecordRefType);
+ MMB.addParam("other", ConstRecordRefType)
.accessHandleFieldOnResource(PH::_0)
- .assign(PH::Handle, PH::LastStmt)
- .returnThis()
- .finalize();
+ .assign(PH::Handle, PH::LastStmt);
+
+ if (getResourceCounterHandleField())
+ MMB.accessCounterHandleFieldOnResource(PH::_0).assign(PH::CounterHandle,
+ PH::LastStmt);
+
+ return MMB.returnThis().finalize();
}
BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addArraySubscriptOperators() {
@@ -903,6 +981,14 @@ FieldDecl *BuiltinTypeDeclBuilder::getResourceHandleField() const {
return I->second;
}
+FieldDecl *BuiltinTypeDeclBuilder::getResourceCounterHandleField() const {
+ auto I = Fields.find("__counter_handle");
+ if (I == Fields.end() ||
+ !I->second->getType()->isHLSLAttributedResourceType())
+ return nullptr;
+ return I->second;
+}
+
QualType BuiltinTypeDeclBuilder::getFirstTemplateTypeParam() {
assert(Template && "record it not a template");
if (const auto *TTD = dyn_cast<TemplateTypeParmDecl>(
diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
index 9448af1..a981602 100644
--- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
+++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h
@@ -72,8 +72,9 @@ public:
AccessSpecifier Access = AccessSpecifier::AS_private);
BuiltinTypeDeclBuilder &
- addHandleMember(ResourceClass RC, bool IsROV, bool RawBuffer,
- AccessSpecifier Access = AccessSpecifier::AS_private);
+ addBufferHandles(ResourceClass RC, bool IsROV, bool RawBuffer,
+ bool HasCounter,
+ AccessSpecifier Access = AccessSpecifier::AS_private);
BuiltinTypeDeclBuilder &addArraySubscriptOperators();
// Builtin types constructors
@@ -95,7 +96,18 @@ public:
BuiltinTypeDeclBuilder &addConsumeMethod();
private:
+ BuiltinTypeDeclBuilder &addResourceMember(StringRef MemberName,
+ ResourceClass RC, bool IsROV,
+ bool RawBuffer, bool IsCounter,
+ AccessSpecifier Access);
+ BuiltinTypeDeclBuilder &
+ addHandleMember(ResourceClass RC, bool IsROV, bool RawBuffer,
+ AccessSpecifier Access = AccessSpecifier::AS_private);
+ BuiltinTypeDeclBuilder &
+ addCounterHandleMember(ResourceClass RC, bool IsROV, bool RawBuffer,
+ AccessSpecifier Access = AccessSpecifier::AS_private);
FieldDecl *getResourceHandleField() const;
+ FieldDecl *getResourceCounterHandleField() const;
QualType getFirstTemplateTypeParam();
QualType getHandleElementType();
Expr *getConstantIntExpr(int value);
diff --git a/clang/lib/Sema/HLSLExternalSemaSource.cpp b/clang/lib/Sema/HLSLExternalSemaSource.cpp
index 464922b..cc43e94 100644
--- a/clang/lib/Sema/HLSLExternalSemaSource.cpp
+++ b/clang/lib/Sema/HLSLExternalSemaSource.cpp
@@ -230,9 +230,9 @@ void HLSLExternalSemaSource::defineTrivialHLSLTypes() {
/// Set up common members and attributes for buffer types
static BuiltinTypeDeclBuilder setupBufferType(CXXRecordDecl *Decl, Sema &S,
ResourceClass RC, bool IsROV,
- bool RawBuffer) {
+ bool RawBuffer, bool HasCounter) {
return BuiltinTypeDeclBuilder(S, Decl)
- .addHandleMember(RC, IsROV, RawBuffer)
+ .addBufferHandles(RC, IsROV, RawBuffer, HasCounter)
.addDefaultHandleConstructor()
.addCopyConstructor()
.addCopyAssignmentOperator()
@@ -377,7 +377,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::SRV, /*IsROV=*/false,
- /*RawBuffer=*/false)
+ /*RawBuffer=*/false, /*HasCounter=*/false)
.addArraySubscriptOperators()
.addLoadMethods()
.completeDefinition();
@@ -389,7 +389,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false,
- /*RawBuffer=*/false)
+ /*RawBuffer=*/false, /*HasCounter=*/false)
.addArraySubscriptOperators()
.addLoadMethods()
.completeDefinition();
@@ -401,7 +401,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
.finalizeForwardDeclaration();
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/true,
- /*RawBuffer=*/false)
+ /*RawBuffer=*/false, /*HasCounter=*/false)
.addArraySubscriptOperators()
.addLoadMethods()
.completeDefinition();
@@ -412,7 +412,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
.finalizeForwardDeclaration();
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::SRV, /*IsROV=*/false,
- /*RawBuffer=*/true)
+ /*RawBuffer=*/true, /*HasCounter=*/false)
.addArraySubscriptOperators()
.addLoadMethods()
.completeDefinition();
@@ -423,7 +423,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
.finalizeForwardDeclaration();
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false,
- /*RawBuffer=*/true)
+ /*RawBuffer=*/true, /*HasCounter=*/true)
.addArraySubscriptOperators()
.addLoadMethods()
.addIncrementCounterMethod()
@@ -437,7 +437,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
.finalizeForwardDeclaration();
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false,
- /*RawBuffer=*/true)
+ /*RawBuffer=*/true, /*HasCounter=*/true)
.addAppendMethod()
.completeDefinition();
});
@@ -448,7 +448,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
.finalizeForwardDeclaration();
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false,
- /*RawBuffer=*/true)
+ /*RawBuffer=*/true, /*HasCounter=*/true)
.addConsumeMethod()
.completeDefinition();
});
@@ -459,7 +459,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
.finalizeForwardDeclaration();
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/true,
- /*RawBuffer=*/true)
+ /*RawBuffer=*/true, /*HasCounter=*/true)
.addArraySubscriptOperators()
.addLoadMethods()
.addIncrementCounterMethod()
@@ -471,14 +471,14 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
.finalizeForwardDeclaration();
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::SRV, /*IsROV=*/false,
- /*RawBuffer=*/true)
+ /*RawBuffer=*/true, /*HasCounter=*/false)
.completeDefinition();
});
Decl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "RWByteAddressBuffer")
.finalizeForwardDeclaration();
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false,
- /*RawBuffer=*/true)
+ /*RawBuffer=*/true, /*HasCounter=*/false)
.completeDefinition();
});
Decl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace,
@@ -486,7 +486,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
.finalizeForwardDeclaration();
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/true,
- /*RawBuffer=*/true)
+ /*RawBuffer=*/true, /*HasCounter=*/false)
.completeDefinition();
});
}
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 39c3aa2..7ce3513 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -14881,13 +14881,11 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
// Diag message shows element size in bits and in "bytes" (platform-
// dependent CharUnits)
DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
- PDiag(DiagID)
- << toString(index, 10, true) << AddrBits
- << (unsigned)ASTC.toBits(*ElemCharUnits)
- << toString(ElemBytes, 10, false)
- << toString(MaxElems, 10, false)
- << (unsigned)MaxElems.getLimitedValue(~0U)
- << IndexExpr->getSourceRange());
+ PDiag(DiagID) << index << AddrBits
+ << (unsigned)ASTC.toBits(*ElemCharUnits)
+ << ElemBytes << MaxElems
+ << MaxElems.getZExtValue()
+ << IndexExpr->getSourceRange());
const NamedDecl *ND = nullptr;
// Try harder to find a NamedDecl to point at in the note.
@@ -14970,10 +14968,10 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
unsigned CastMsg = (!ASE || BaseType == EffectiveType) ? 0 : 1;
QualType CastMsgTy = ASE ? ASE->getLHS()->getType() : QualType();
- DiagRuntimeBehavior(
- BaseExpr->getBeginLoc(), BaseExpr,
- PDiag(DiagID) << toString(index, 10, true) << ArrayTy->desugar()
- << CastMsg << CastMsgTy << IndexExpr->getSourceRange());
+ DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
+ PDiag(DiagID)
+ << index << ArrayTy->desugar() << CastMsg
+ << CastMsgTy << IndexExpr->getSourceRange());
} else {
unsigned DiagID = diag::warn_array_index_precedes_bounds;
if (!ASE) {
@@ -14982,8 +14980,7 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
}
DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
- PDiag(DiagID) << toString(index, 10, true)
- << IndexExpr->getSourceRange());
+ PDiag(DiagID) << index << IndexExpr->getSourceRange());
}
const NamedDecl *ND = nullptr;
@@ -15946,7 +15943,7 @@ void Sema::RefersToMemberWithReducedAlignment(
}
// Check if the synthesized offset fulfills the alignment.
- if (Offset % ExpectedAlignment != 0 ||
+ if (!Offset.isMultipleOf(ExpectedAlignment) ||
// It may fulfill the offset it but the effective alignment may still be
// lower than the expected expression alignment.
CompleteObjectAlignment < ExpectedAlignment) {
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index dc6d232..8413090 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -12,9 +12,11 @@
#include "clang/Sema/SemaConcept.h"
#include "TreeTransform.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprConcepts.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
@@ -27,7 +29,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/StringExtras.h"
-#include <optional>
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace sema;
@@ -85,7 +87,7 @@ public:
OK_Ordinary, Loc, FPOptionsOverride{});
}
};
-}
+} // namespace
bool Sema::CheckConstraintExpression(const Expr *ConstraintExpression,
Token NextToken, bool *PossibleNonPrimary,
@@ -146,14 +148,14 @@ bool Sema::CheckConstraintExpression(const Expr *ConstraintExpression,
if (!Context.hasSameUnqualifiedType(Type, Context.BoolTy)) {
Diag(ConstraintExpression->getExprLoc(),
- diag::err_non_bool_atomic_constraint) << Type
- << ConstraintExpression->getSourceRange();
+ diag::err_non_bool_atomic_constraint)
+ << Type << ConstraintExpression->getSourceRange();
CheckForNonPrimary();
return false;
}
if (PossibleNonPrimary)
- *PossibleNonPrimary = false;
+ *PossibleNonPrimary = false;
return true;
}
@@ -164,52 +166,315 @@ struct SatisfactionStackRAII {
SatisfactionStackRAII(Sema &SemaRef, const NamedDecl *ND,
const llvm::FoldingSetNodeID &FSNID)
: SemaRef(SemaRef) {
- if (ND) {
+ if (ND) {
SemaRef.PushSatisfactionStackEntry(ND, FSNID);
Inserted = true;
- }
+ }
}
~SatisfactionStackRAII() {
- if (Inserted)
- SemaRef.PopSatisfactionStackEntry();
+ if (Inserted)
+ SemaRef.PopSatisfactionStackEntry();
}
};
} // namespace
-static bool
-DiagRecursiveConstraintEval(Sema &S, llvm::FoldingSetNodeID &ID,
- const NamedDecl *Templ, const Expr *E,
- const MultiLevelTemplateArgumentList &MLTAL) {
+static bool DiagRecursiveConstraintEval(
+ Sema &S, llvm::FoldingSetNodeID &ID, const NamedDecl *Templ, const Expr *E,
+ const MultiLevelTemplateArgumentList *MLTAL = nullptr) {
E->Profile(ID, S.Context, /*Canonical=*/true);
- for (const auto &List : MLTAL)
- for (const auto &TemplateArg : List.Args)
- TemplateArg.Profile(ID, S.Context);
-
- // Note that we have to do this with our own collection, because there are
- // times where a constraint-expression check can cause us to need to evaluate
- // other constriants that are unrelated, such as when evaluating a recovery
- // expression, or when trying to determine the constexpr-ness of special
- // members. Otherwise we could just use the
- // Sema::InstantiatingTemplate::isAlreadyBeingInstantiated function.
+ if (MLTAL) {
+ for (const auto &List : *MLTAL)
+ for (const auto &TemplateArg : List.Args)
+ S.Context.getCanonicalTemplateArgument(TemplateArg)
+ .Profile(ID, S.Context);
+ }
if (S.SatisfactionStackContains(Templ, ID)) {
S.Diag(E->getExprLoc(), diag::err_constraint_depends_on_self)
<< E << E->getSourceRange();
return true;
}
-
return false;
}
-static ExprResult EvaluateAtomicConstraint(
- Sema &S, const Expr *AtomicExpr, const NamedDecl *Template,
- SourceLocation TemplateNameLoc, const MultiLevelTemplateArgumentList &MLTAL,
- ConstraintSatisfaction &Satisfaction) {
+// Figure out the to-translation-unit depth for this function declaration for
+// the purpose of seeing if they differ by constraints. This isn't the same as
+// getTemplateDepth, because it includes already instantiated parents.
+static unsigned
+CalculateTemplateDepthForConstraints(Sema &S, const NamedDecl *ND,
+ bool SkipForSpecialization = false) {
+ MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
+ ND, ND->getLexicalDeclContext(), /*Final=*/false,
+ /*Innermost=*/std::nullopt,
+ /*RelativeToPrimary=*/true,
+ /*Pattern=*/nullptr,
+ /*ForConstraintInstantiation=*/true, SkipForSpecialization);
+ return MLTAL.getNumLevels();
+}
+
+namespace {
+class AdjustConstraintDepth : public TreeTransform<AdjustConstraintDepth> {
+ unsigned TemplateDepth = 0;
+
+public:
+ using inherited = TreeTransform<AdjustConstraintDepth>;
+ AdjustConstraintDepth(Sema &SemaRef, unsigned TemplateDepth)
+ : inherited(SemaRef), TemplateDepth(TemplateDepth) {}
+
+ using inherited::TransformTemplateTypeParmType;
+ QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB,
+ TemplateTypeParmTypeLoc TL, bool) {
+ const TemplateTypeParmType *T = TL.getTypePtr();
+
+ TemplateTypeParmDecl *NewTTPDecl = nullptr;
+ if (TemplateTypeParmDecl *OldTTPDecl = T->getDecl())
+ NewTTPDecl = cast_or_null<TemplateTypeParmDecl>(
+ TransformDecl(TL.getNameLoc(), OldTTPDecl));
+
+ QualType Result = getSema().Context.getTemplateTypeParmType(
+ T->getDepth() + TemplateDepth, T->getIndex(), T->isParameterPack(),
+ NewTTPDecl);
+ TemplateTypeParmTypeLoc NewTL = TLB.push<TemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ }
+
+ bool AlreadyTransformed(QualType T) {
+ if (T.isNull())
+ return true;
+
+ if (T->isInstantiationDependentType() || T->isVariablyModifiedType() ||
+ T->containsUnexpandedParameterPack())
+ return false;
+ return true;
+ }
+};
+} // namespace
+
+namespace {
+
+// FIXME: Convert it to DynamicRecursiveASTVisitor
+class HashParameterMapping : public RecursiveASTVisitor<HashParameterMapping> {
+ using inherited = RecursiveASTVisitor<HashParameterMapping>;
+ friend inherited;
+
+ Sema &SemaRef;
+ const MultiLevelTemplateArgumentList &TemplateArgs;
+ llvm::FoldingSetNodeID &ID;
+ llvm::SmallVector<TemplateArgument, 10> UsedTemplateArgs;
+
+ UnsignedOrNone OuterPackSubstIndex;
+
+ TemplateArgument getPackSubstitutedTemplateArgument(TemplateArgument Arg) {
+ assert(*SemaRef.ArgPackSubstIndex < Arg.pack_size());
+ Arg = Arg.pack_begin()[*SemaRef.ArgPackSubstIndex];
+ if (Arg.isPackExpansion())
+ Arg = Arg.getPackExpansionPattern();
+ return Arg;
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+
+public:
+ HashParameterMapping(Sema &SemaRef,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ llvm::FoldingSetNodeID &ID,
+ UnsignedOrNone OuterPackSubstIndex)
+ : SemaRef(SemaRef), TemplateArgs(TemplateArgs), ID(ID),
+ OuterPackSubstIndex(OuterPackSubstIndex) {}
+
+ bool VisitTemplateTypeParmType(TemplateTypeParmType *T) {
+ // A lambda expression can introduce template parameters that don't have
+ // corresponding template arguments yet.
+ if (T->getDepth() >= TemplateArgs.getNumLevels())
+ return true;
+
+ TemplateArgument Arg = TemplateArgs(T->getDepth(), T->getIndex());
+
+ if (T->isParameterPack() && SemaRef.ArgPackSubstIndex) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ Arg = getPackSubstitutedTemplateArgument(Arg);
+ }
+
+ UsedTemplateArgs.push_back(
+ SemaRef.Context.getCanonicalTemplateArgument(Arg));
+ return true;
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ NamedDecl *D = E->getDecl();
+ NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D);
+ if (!NTTP)
+ return TraverseDecl(D);
+
+ TemplateArgument Arg = TemplateArgs(NTTP->getDepth(), NTTP->getPosition());
+ if (NTTP->isParameterPack() && SemaRef.ArgPackSubstIndex) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+ Arg = getPackSubstitutedTemplateArgument(Arg);
+ }
+
+ UsedTemplateArgs.push_back(
+ SemaRef.Context.getCanonicalTemplateArgument(Arg));
+ return true;
+ }
+
+ bool VisitTypedefType(TypedefType *TT) {
+ return inherited::TraverseType(TT->desugar());
+ }
+
+ bool TraverseDecl(Decl *D) {
+ if (auto *VD = dyn_cast<ValueDecl>(D))
+ return TraverseType(VD->getType());
+
+ return inherited::TraverseDecl(D);
+ }
+
+ bool TraverseTypeLoc(TypeLoc TL, bool TraverseQualifier = true) {
+ // We don't care about TypeLocs. So traverse Types instead.
+ return TraverseType(TL.getType(), TraverseQualifier);
+ }
+
+ bool TraverseTagType(const TagType *T, bool TraverseQualifier) {
+ // T's parent can be dependent while T doesn't have any template arguments.
+ // We should have already traversed its qualifier.
+ // FIXME: Add an assert to catch cases where we failed to profile the
+ // concept. assert(!T->isDependentType() && "We missed a case in profiling
+ // concepts!");
+ return true;
+ }
+
+ bool TraverseInjectedClassNameType(InjectedClassNameType *T,
+ bool TraverseQualifier) {
+ return TraverseTemplateArguments(T->getTemplateArgs(SemaRef.Context));
+ }
+
+ bool TraverseTemplateArgument(const TemplateArgument &Arg) {
+ if (!Arg.containsUnexpandedParameterPack() || Arg.isPackExpansion()) {
+ // Act as if we are fully expanding this pack, if it is a PackExpansion.
+ Sema::ArgPackSubstIndexRAII _1(SemaRef, std::nullopt);
+ llvm::SaveAndRestore<UnsignedOrNone> _2(OuterPackSubstIndex,
+ std::nullopt);
+ return inherited::TraverseTemplateArgument(Arg);
+ }
+
+ Sema::ArgPackSubstIndexRAII _1(SemaRef, OuterPackSubstIndex);
+ return inherited::TraverseTemplateArgument(Arg);
+ }
+
+ void VisitConstraint(const NormalizedConstraintWithParamMapping &Constraint) {
+ if (!Constraint.hasParameterMapping()) {
+ for (const auto &List : TemplateArgs)
+ for (const TemplateArgument &Arg : List.Args)
+ SemaRef.Context.getCanonicalTemplateArgument(Arg).Profile(
+ ID, SemaRef.Context);
+ return;
+ }
+
+ llvm::ArrayRef<TemplateArgumentLoc> Mapping =
+ Constraint.getParameterMapping();
+ for (auto &ArgLoc : Mapping) {
+ TemplateArgument Canonical =
+ SemaRef.Context.getCanonicalTemplateArgument(ArgLoc.getArgument());
+ // We don't want sugars to impede the profile of cache.
+ UsedTemplateArgs.push_back(Canonical);
+ TraverseTemplateArgument(Canonical);
+ }
+
+ for (auto &Used : UsedTemplateArgs) {
+ llvm::FoldingSetNodeID R;
+ Used.Profile(R, SemaRef.Context);
+ ID.AddNodeID(R);
+ }
+ }
+};
+
+class ConstraintSatisfactionChecker {
+ Sema &S;
+ const NamedDecl *Template;
+ SourceLocation TemplateNameLoc;
+ UnsignedOrNone PackSubstitutionIndex;
+
+ ConstraintSatisfaction &Satisfaction;
+
+private:
+ ExprResult
+ EvaluateAtomicConstraint(const Expr *AtomicExpr,
+ const MultiLevelTemplateArgumentList &MLTAL);
+
+ UnsignedOrNone EvaluateFoldExpandedConstraintSize(
+ const FoldExpandedConstraint &FE,
+ const MultiLevelTemplateArgumentList &MLTAL);
+
+ // XXX: It is SLOW! Use it very carefully.
+ std::optional<MultiLevelTemplateArgumentList> SubstitutionInTemplateArguments(
+ const NormalizedConstraintWithParamMapping &Constraint,
+ MultiLevelTemplateArgumentList MLTAL,
+ llvm::SmallVector<TemplateArgument> &SubstitutedOuterMost);
+
+ ExprResult EvaluateSlow(const AtomicConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL);
+
+ ExprResult Evaluate(const AtomicConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL);
+
+ ExprResult EvaluateSlow(const FoldExpandedConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL);
+
+ ExprResult Evaluate(const FoldExpandedConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL);
+
+ ExprResult EvaluateSlow(const ConceptIdConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL,
+ unsigned int Size);
+
+ ExprResult Evaluate(const ConceptIdConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL);
+
+ ExprResult Evaluate(const CompoundConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL);
+
+public:
+ ConstraintSatisfactionChecker(Sema &SemaRef, const NamedDecl *Template,
+ SourceLocation TemplateNameLoc,
+ UnsignedOrNone PackSubstitutionIndex,
+ ConstraintSatisfaction &Satisfaction)
+ : S(SemaRef), Template(Template), TemplateNameLoc(TemplateNameLoc),
+ PackSubstitutionIndex(PackSubstitutionIndex),
+ Satisfaction(Satisfaction) {}
+
+ ExprResult Evaluate(const NormalizedConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL);
+};
+
+StringRef allocateStringFromConceptDiagnostic(const Sema &S,
+ const PartialDiagnostic Diag) {
+ SmallString<128> DiagString;
+ DiagString = ": ";
+ Diag.EmitToString(S.getDiagnostics(), DiagString);
+ return S.getASTContext().backupStr(DiagString);
+}
+
+} // namespace
+
+ExprResult ConstraintSatisfactionChecker::EvaluateAtomicConstraint(
+ const Expr *AtomicExpr, const MultiLevelTemplateArgumentList &MLTAL) {
EnterExpressionEvaluationContext ConstantEvaluated(
S, Sema::ExpressionEvaluationContext::ConstantEvaluated,
Sema::ReuseLambdaContextDecl);
+ llvm::FoldingSetNodeID ID;
+ if (Template &&
+ DiagRecursiveConstraintEval(S, ID, Template, AtomicExpr, &MLTAL)) {
+ Satisfaction.IsSatisfied = false;
+ Satisfaction.ContainsErrors = true;
+ return ExprEmpty();
+ }
+ SatisfactionStackRAII StackRAII(S, Template, ID);
+
// Atomic constraint - substitute arguments and check satisfaction.
- ExprResult SubstitutedExpression;
+ ExprResult SubstitutedExpression = const_cast<Expr *>(AtomicExpr);
{
TemplateDeductionInfo Info(TemplateNameLoc);
Sema::InstantiatingTemplate Inst(
@@ -220,16 +485,6 @@ static ExprResult EvaluateAtomicConstraint(
if (Inst.isInvalid())
return ExprError();
- llvm::FoldingSetNodeID ID;
- if (Template &&
- DiagRecursiveConstraintEval(S, ID, Template, AtomicExpr, MLTAL)) {
- Satisfaction.IsSatisfied = false;
- Satisfaction.ContainsErrors = true;
- return ExprEmpty();
- }
-
- SatisfactionStackRAII StackRAII(S, Template, ID);
-
// We do not want error diagnostics escaping here.
Sema::SFINAETrap Trap(S);
SubstitutedExpression =
@@ -247,21 +502,16 @@ static ExprResult EvaluateAtomicConstraint(
PartialDiagnosticAt SubstDiag{SourceLocation(),
PartialDiagnostic::NullDiagnostic()};
Info.takeSFINAEDiagnostic(SubstDiag);
- // FIXME: Concepts: This is an unfortunate consequence of there
+ // FIXME: This is an unfortunate consequence of there
// being no serialization code for PartialDiagnostics and the fact
// that serializing them would likely take a lot more storage than
// just storing them as strings. We would still like, in the
// future, to serialize the proper PartialDiagnostic as serializing
// it as a string defeats the purpose of the diagnostic mechanism.
- SmallString<128> DiagString;
- DiagString = ": ";
- SubstDiag.second.EmitToString(S.getDiagnostics(), DiagString);
- unsigned MessageSize = DiagString.size();
- char *Mem = new (S.Context) char[MessageSize];
- memcpy(Mem, DiagString.c_str(), MessageSize);
Satisfaction.Details.emplace_back(
- new (S.Context) ConstraintSatisfaction::SubstitutionDiagnostic{
- SubstDiag.first, StringRef(Mem, MessageSize)});
+ new (S.Context) ConstraintSubstitutionDiagnostic{
+ SubstDiag.first,
+ allocateStringFromConceptDiagnostic(S, SubstDiag.second)});
Satisfaction.IsSatisfied = false;
return ExprEmpty();
}
@@ -289,216 +539,94 @@ static ExprResult EvaluateAtomicConstraint(
return SubstitutedExpression;
}
-static UnsignedOrNone EvaluateFoldExpandedConstraintSize(
- Sema &S, const CXXFoldExpr *FE, const NamedDecl *Template,
- SourceLocation TemplateNameLoc, const MultiLevelTemplateArgumentList &MLTAL,
- ConstraintSatisfaction &Satisfaction) {
-
- // We should ignore errors in the presence of packs of different size.
- Sema::SFINAETrap Trap(S);
-
- Expr *Pattern = FE->getPattern();
+std::optional<MultiLevelTemplateArgumentList>
+ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
+ const NormalizedConstraintWithParamMapping &Constraint,
+ MultiLevelTemplateArgumentList MLTAL,
+ llvm::SmallVector<TemplateArgument> &SubstitutedOuterMost) {
- SmallVector<UnexpandedParameterPack, 2> Unexpanded;
- S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
- assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
- bool Expand = true;
- bool RetainExpansion = false;
- UnsignedOrNone NumExpansions = FE->getNumExpansions();
- if (S.CheckParameterPacksForExpansion(
- FE->getEllipsisLoc(), Pattern->getSourceRange(), Unexpanded, MLTAL,
- /*FailOnPackProducingTemplates=*/true, Expand, RetainExpansion,
- NumExpansions) ||
- !Expand || RetainExpansion)
- return std::nullopt;
+ if (!Constraint.hasParameterMapping())
+ return std::move(MLTAL);
- if (NumExpansions && S.getLangOpts().BracketDepth < *NumExpansions) {
- S.Diag(FE->getEllipsisLoc(),
- clang::diag::err_fold_expression_limit_exceeded)
- << *NumExpansions << S.getLangOpts().BracketDepth
- << FE->getSourceRange();
- S.Diag(FE->getEllipsisLoc(), diag::note_bracket_depth);
+ TemplateDeductionInfo Info(Constraint.getBeginLoc());
+ Sema::InstantiatingTemplate Inst(
+ S, Constraint.getBeginLoc(),
+ Sema::InstantiatingTemplate::ConstraintSubstitution{},
+ // FIXME: improve const-correctness of InstantiatingTemplate
+ const_cast<NamedDecl *>(Template), Info, Constraint.getSourceRange());
+ if (Inst.isInvalid())
return std::nullopt;
- }
- return NumExpansions;
-}
-
-static ExprResult calculateConstraintSatisfaction(
- Sema &S, const Expr *ConstraintExpr, const NamedDecl *Template,
- SourceLocation TemplateNameLoc, const MultiLevelTemplateArgumentList &MLTAL,
- ConstraintSatisfaction &Satisfaction);
-
-static ExprResult calculateConstraintSatisfaction(
- Sema &S, const Expr *LHS, OverloadedOperatorKind Op, const Expr *RHS,
- const NamedDecl *Template, SourceLocation TemplateNameLoc,
- const MultiLevelTemplateArgumentList &MLTAL,
- ConstraintSatisfaction &Satisfaction) {
- size_t EffectiveDetailEndIndex = Satisfaction.Details.size();
-
- ExprResult LHSRes = calculateConstraintSatisfaction(
- S, LHS, Template, TemplateNameLoc, MLTAL, Satisfaction);
-
- if (LHSRes.isInvalid())
- return ExprError();
-
- bool IsLHSSatisfied = Satisfaction.IsSatisfied;
-
- if (Op == clang::OO_PipePipe && IsLHSSatisfied)
- // [temp.constr.op] p3
- // A disjunction is a constraint taking two operands. To determine if
- // a disjunction is satisfied, the satisfaction of the first operand
- // is checked. If that is satisfied, the disjunction is satisfied.
- // Otherwise, the disjunction is satisfied if and only if the second
- // operand is satisfied.
- // LHS is instantiated while RHS is not. Skip creating invalid BinaryOp.
- return LHSRes;
-
- if (Op == clang::OO_AmpAmp && !IsLHSSatisfied)
- // [temp.constr.op] p2
- // A conjunction is a constraint taking two operands. To determine if
- // a conjunction is satisfied, the satisfaction of the first operand
- // is checked. If that is not satisfied, the conjunction is not
- // satisfied. Otherwise, the conjunction is satisfied if and only if
- // the second operand is satisfied.
- // LHS is instantiated while RHS is not. Skip creating invalid BinaryOp.
- return LHSRes;
-
- ExprResult RHSRes = calculateConstraintSatisfaction(
- S, RHS, Template, TemplateNameLoc, MLTAL, Satisfaction);
- if (RHSRes.isInvalid())
- return ExprError();
- bool IsRHSSatisfied = Satisfaction.IsSatisfied;
- // Current implementation adds diagnostic information about the falsity
- // of each false atomic constraint expression when it evaluates them.
- // When the evaluation results to `false || true`, the information
- // generated during the evaluation of left-hand side is meaningless
- // because the whole expression evaluates to true.
- // The following code removes the irrelevant diagnostic information.
- // FIXME: We should probably delay the addition of diagnostic information
- // until we know the entire expression is false.
- if (Op == clang::OO_PipePipe && IsRHSSatisfied) {
- auto EffectiveDetailEnd = Satisfaction.Details.begin();
- std::advance(EffectiveDetailEnd, EffectiveDetailEndIndex);
- Satisfaction.Details.erase(EffectiveDetailEnd, Satisfaction.Details.end());
- }
-
- if (!LHSRes.isUsable() || !RHSRes.isUsable())
- return ExprEmpty();
-
- return BinaryOperator::Create(S.Context, LHSRes.get(), RHSRes.get(),
- BinaryOperator::getOverloadedOpcode(Op),
- S.Context.BoolTy, VK_PRValue, OK_Ordinary,
- LHS->getBeginLoc(), FPOptionsOverride{});
-}
-
-static ExprResult calculateConstraintSatisfaction(
- Sema &S, const CXXFoldExpr *FE, const NamedDecl *Template,
- SourceLocation TemplateNameLoc, const MultiLevelTemplateArgumentList &MLTAL,
- ConstraintSatisfaction &Satisfaction) {
- bool Conjunction = FE->getOperator() == BinaryOperatorKind::BO_LAnd;
- size_t EffectiveDetailEndIndex = Satisfaction.Details.size();
-
- ExprResult Out;
- if (FE->isLeftFold() && FE->getInit()) {
- Out = calculateConstraintSatisfaction(S, FE->getInit(), Template,
- TemplateNameLoc, MLTAL, Satisfaction);
- if (Out.isInvalid())
- return ExprError();
+ Sema::SFINAETrap Trap(S);
- // If the first clause of a conjunction is not satisfied,
- // or if the first clause of a disjection is satisfied,
- // we have established satisfaction of the whole constraint
- // and we should not continue further.
- if (Conjunction != Satisfaction.IsSatisfied)
- return Out;
- }
- UnsignedOrNone NumExpansions = EvaluateFoldExpandedConstraintSize(
- S, FE, Template, TemplateNameLoc, MLTAL, Satisfaction);
- if (!NumExpansions)
- return ExprError();
- for (unsigned I = 0; I < *NumExpansions; I++) {
- Sema::ArgPackSubstIndexRAII SubstIndex(S, I);
- ExprResult Res = calculateConstraintSatisfaction(
- S, FE->getPattern(), Template, TemplateNameLoc, MLTAL, Satisfaction);
- if (Res.isInvalid())
- return ExprError();
- bool IsRHSSatisfied = Satisfaction.IsSatisfied;
- if (!Conjunction && IsRHSSatisfied) {
- auto EffectiveDetailEnd = Satisfaction.Details.begin();
- std::advance(EffectiveDetailEnd, EffectiveDetailEndIndex);
- Satisfaction.Details.erase(EffectiveDetailEnd,
- Satisfaction.Details.end());
- }
- if (Out.isUnset())
- Out = Res;
- else if (!Res.isUnset()) {
- Out = BinaryOperator::Create(
- S.Context, Out.get(), Res.get(), FE->getOperator(), S.Context.BoolTy,
- VK_PRValue, OK_Ordinary, FE->getBeginLoc(), FPOptionsOverride{});
- }
- if (Conjunction != IsRHSSatisfied)
- return Out;
+ TemplateArgumentListInfo SubstArgs;
+ Sema::ArgPackSubstIndexRAII SubstIndex(
+ S, Constraint.getPackSubstitutionIndex()
+ ? Constraint.getPackSubstitutionIndex()
+ : PackSubstitutionIndex);
+
+ if (S.SubstTemplateArgumentsInParameterMapping(
+ Constraint.getParameterMapping(), Constraint.getBeginLoc(), MLTAL,
+ SubstArgs, /*BuildPackExpansionTypes=*/true)) {
+ Satisfaction.IsSatisfied = false;
+ return std::nullopt;
}
- if (FE->isRightFold() && FE->getInit()) {
- ExprResult Res = calculateConstraintSatisfaction(
- S, FE->getInit(), Template, TemplateNameLoc, MLTAL, Satisfaction);
- if (Out.isInvalid())
- return ExprError();
-
- if (Out.isUnset())
- Out = Res;
- else if (!Res.isUnset()) {
- Out = BinaryOperator::Create(
- S.Context, Out.get(), Res.get(), FE->getOperator(), S.Context.BoolTy,
- VK_PRValue, OK_Ordinary, FE->getBeginLoc(), FPOptionsOverride{});
+ Sema::CheckTemplateArgumentInfo CTAI;
+ auto *TD = const_cast<TemplateDecl *>(
+ cast<TemplateDecl>(Constraint.getConstraintDecl()));
+ if (S.CheckTemplateArgumentList(TD, Constraint.getUsedTemplateParamList(),
+ TD->getLocation(), SubstArgs,
+ /*DefaultArguments=*/{},
+ /*PartialTemplateArgs=*/false, CTAI))
+ return std::nullopt;
+ const NormalizedConstraint::OccurenceList &Used =
+ Constraint.mappingOccurenceList();
+ SubstitutedOuterMost =
+ llvm::to_vector_of<TemplateArgument>(MLTAL.getOutermost());
+ unsigned Offset = 0;
+ for (unsigned I = 0, MappedIndex = 0; I < Used.size(); I++) {
+ TemplateArgument Arg;
+ if (Used[I])
+ Arg = S.Context.getCanonicalTemplateArgument(
+ CTAI.SugaredConverted[MappedIndex++]);
+ if (I < SubstitutedOuterMost.size()) {
+ SubstitutedOuterMost[I] = Arg;
+ Offset = I + 1;
+ } else {
+ SubstitutedOuterMost.push_back(Arg);
+ Offset = SubstitutedOuterMost.size();
}
}
+ if (Offset < SubstitutedOuterMost.size())
+ SubstitutedOuterMost.erase(SubstitutedOuterMost.begin() + Offset);
- if (Out.isUnset()) {
- Satisfaction.IsSatisfied = Conjunction;
- Out = S.BuildEmptyCXXFoldExpr(FE->getBeginLoc(), FE->getOperator());
- }
- return Out;
+ MLTAL.replaceOutermostTemplateArguments(
+ const_cast<NamedDecl *>(Constraint.getConstraintDecl()),
+ SubstitutedOuterMost);
+ return std::move(MLTAL);
}
-static ExprResult calculateConstraintSatisfaction(
- Sema &S, const Expr *ConstraintExpr, const NamedDecl *Template,
- SourceLocation TemplateNameLoc, const MultiLevelTemplateArgumentList &MLTAL,
- ConstraintSatisfaction &Satisfaction) {
- ConstraintExpr = ConstraintExpr->IgnoreParenImpCasts();
-
- if (LogicalBinOp BO = ConstraintExpr)
- return calculateConstraintSatisfaction(
- S, BO.getLHS(), BO.getOp(), BO.getRHS(), Template, TemplateNameLoc,
- MLTAL, Satisfaction);
+ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
+ const AtomicConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL) {
- if (auto *C = dyn_cast<ExprWithCleanups>(ConstraintExpr)) {
- // These aren't evaluated, so we don't care about cleanups, so we can just
- // evaluate these as if the cleanups didn't exist.
- return calculateConstraintSatisfaction(
- S, C->getSubExpr(), Template, TemplateNameLoc, MLTAL, Satisfaction);
- }
-
- if (auto *FE = dyn_cast<CXXFoldExpr>(ConstraintExpr);
- FE && S.getLangOpts().CPlusPlus26 &&
- (FE->getOperator() == BinaryOperatorKind::BO_LAnd ||
- FE->getOperator() == BinaryOperatorKind::BO_LOr)) {
- return calculateConstraintSatisfaction(S, FE, Template, TemplateNameLoc,
- MLTAL, Satisfaction);
+ llvm::SmallVector<TemplateArgument> SubstitutedOuterMost;
+ std::optional<MultiLevelTemplateArgumentList> SubstitutedArgs =
+ SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOuterMost);
+ if (!SubstitutedArgs) {
+ Satisfaction.IsSatisfied = false;
+ return ExprEmpty();
}
- // FIXME: We should not treat ConceptSpecializationExpr as atomic constraints.
-
- // An atomic constraint expression
+ Sema::ArgPackSubstIndexRAII SubstIndex(S, PackSubstitutionIndex);
ExprResult SubstitutedAtomicExpr = EvaluateAtomicConstraint(
- S, ConstraintExpr, Template, TemplateNameLoc, MLTAL, Satisfaction);
+ Constraint.getConstraintExpr(), *SubstitutedArgs);
if (SubstitutedAtomicExpr.isInvalid())
return ExprError();
- if (!SubstitutedAtomicExpr.isUsable())
+ if (SubstitutedAtomicExpr.isUnset())
// Evaluator has decided satisfaction without yielding an expression.
return ExprEmpty();
@@ -512,16 +640,16 @@ static ExprResult calculateConstraintSatisfaction(
Satisfaction.ContainsErrors = true;
PartialDiagnostic Msg = S.PDiag(diag::note_constraint_references_error);
- SmallString<128> DiagString;
- DiagString = ": ";
- Msg.EmitToString(S.getDiagnostics(), DiagString);
- unsigned MessageSize = DiagString.size();
- char *Mem = new (S.Context) char[MessageSize];
- memcpy(Mem, DiagString.c_str(), MessageSize);
Satisfaction.Details.emplace_back(
- new (S.Context) ConstraintSatisfaction::SubstitutionDiagnostic{
+ new (S.Context) ConstraintSubstitutionDiagnostic{
SubstitutedAtomicExpr.get()->getBeginLoc(),
- StringRef(Mem, MessageSize)});
+ allocateStringFromConceptDiagnostic(S, Msg)});
+ return SubstitutedAtomicExpr;
+ }
+
+ if (SubstitutedAtomicExpr.get()->isValueDependent()) {
+ Satisfaction.IsSatisfied = true;
+ Satisfaction.ContainsErrors = false;
return SubstitutedAtomicExpr;
}
@@ -552,21 +680,384 @@ static ExprResult calculateConstraintSatisfaction(
return SubstitutedAtomicExpr;
}
-static ExprResult calculateConstraintSatisfaction(
- Sema &S, const NamedDecl *Template, SourceLocation TemplateNameLoc,
- const MultiLevelTemplateArgumentList &MLTAL, const Expr *ConstraintExpr,
- ConstraintSatisfaction &Satisfaction) {
+ExprResult ConstraintSatisfactionChecker::Evaluate(
+ const AtomicConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL) {
+
+ unsigned Size = Satisfaction.Details.size();
+ llvm::FoldingSetNodeID ID;
+ UnsignedOrNone OuterPackSubstIndex =
+ Constraint.getPackSubstitutionIndex()
+ ? Constraint.getPackSubstitutionIndex()
+ : PackSubstitutionIndex;
+
+ ID.AddPointer(Constraint.getConstraintExpr());
+ ID.AddInteger(OuterPackSubstIndex.toInternalRepresentation());
+ HashParameterMapping(S, MLTAL, ID, OuterPackSubstIndex)
+ .VisitConstraint(Constraint);
+
+ if (auto Iter = S.UnsubstitutedConstraintSatisfactionCache.find(ID);
+ Iter != S.UnsubstitutedConstraintSatisfactionCache.end()) {
+
+ auto &Cached = Iter->second.Satisfaction;
+ Satisfaction.ContainsErrors = Cached.ContainsErrors;
+ Satisfaction.IsSatisfied = Cached.IsSatisfied;
+ Satisfaction.Details.insert(Satisfaction.Details.begin() + Size,
+ Cached.Details.begin(), Cached.Details.end());
+ return Iter->second.SubstExpr;
+ }
+
+ ExprResult E = EvaluateSlow(Constraint, MLTAL);
+
+ UnsubstitutedConstraintSatisfactionCacheResult Cache;
+ Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
+ Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
+ std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
+ std::back_inserter(Cache.Satisfaction.Details));
+ Cache.SubstExpr = E;
+ S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
+
+ return E;
+}
+
+UnsignedOrNone
+ConstraintSatisfactionChecker::EvaluateFoldExpandedConstraintSize(
+ const FoldExpandedConstraint &FE,
+ const MultiLevelTemplateArgumentList &MLTAL) {
+
+ // We should ignore errors in the presence of packs of different size.
+ Sema::SFINAETrap Trap(S);
+
+ Expr *Pattern = const_cast<Expr *>(FE.getPattern());
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+ bool Expand = true;
+ bool RetainExpansion = false;
+ UnsignedOrNone NumExpansions(std::nullopt);
+ if (S.CheckParameterPacksForExpansion(
+ Pattern->getExprLoc(), Pattern->getSourceRange(), Unexpanded, MLTAL,
+ /*FailOnPackProducingTemplates=*/false, Expand, RetainExpansion,
+ NumExpansions) ||
+ !Expand || RetainExpansion)
+ return std::nullopt;
+
+ if (NumExpansions && S.getLangOpts().BracketDepth < *NumExpansions) {
+ S.Diag(Pattern->getExprLoc(),
+ clang::diag::err_fold_expression_limit_exceeded)
+ << *NumExpansions << S.getLangOpts().BracketDepth
+ << Pattern->getSourceRange();
+ S.Diag(Pattern->getExprLoc(), diag::note_bracket_depth);
+ return std::nullopt;
+ }
+ return NumExpansions;
+}
- return calculateConstraintSatisfaction(S, ConstraintExpr, Template,
- TemplateNameLoc, MLTAL, Satisfaction);
+ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
+ const FoldExpandedConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL) {
+
+ bool Conjunction = Constraint.getFoldOperator() ==
+ FoldExpandedConstraint::FoldOperatorKind::And;
+ unsigned EffectiveDetailEndIndex = Satisfaction.Details.size();
+
+ llvm::SmallVector<TemplateArgument> SubstitutedOuterMost;
+ // FIXME: Is PackSubstitutionIndex correct?
+ llvm::SaveAndRestore _(PackSubstitutionIndex, S.ArgPackSubstIndex);
+ std::optional<MultiLevelTemplateArgumentList> SubstitutedArgs =
+ SubstitutionInTemplateArguments(
+ static_cast<const NormalizedConstraintWithParamMapping &>(Constraint),
+ MLTAL, SubstitutedOuterMost);
+ if (!SubstitutedArgs) {
+ Satisfaction.IsSatisfied = false;
+ return ExprError();
+ }
+
+ ExprResult Out;
+ UnsignedOrNone NumExpansions =
+ EvaluateFoldExpandedConstraintSize(Constraint, *SubstitutedArgs);
+ if (!NumExpansions)
+ return ExprEmpty();
+
+ if (*NumExpansions == 0) {
+ Satisfaction.IsSatisfied = Conjunction;
+ return ExprEmpty();
+ }
+
+ for (unsigned I = 0; I < *NumExpansions; I++) {
+ Sema::ArgPackSubstIndexRAII SubstIndex(S, I);
+ Satisfaction.IsSatisfied = false;
+ Satisfaction.ContainsErrors = false;
+ ExprResult Expr =
+ ConstraintSatisfactionChecker(S, Template, TemplateNameLoc,
+ UnsignedOrNone(I), Satisfaction)
+ .Evaluate(Constraint.getNormalizedPattern(), *SubstitutedArgs);
+ if (Expr.isUsable()) {
+ if (Out.isUnset())
+ Out = Expr;
+ else
+ Out = BinaryOperator::Create(S.Context, Out.get(), Expr.get(),
+ Conjunction ? BinaryOperatorKind::BO_LAnd
+ : BinaryOperatorKind::BO_LOr,
+ S.Context.BoolTy, VK_PRValue, OK_Ordinary,
+ Constraint.getBeginLoc(),
+ FPOptionsOverride{});
+ } else {
+ assert(!Satisfaction.IsSatisfied);
+ }
+ if (!Conjunction && Satisfaction.IsSatisfied) {
+ Satisfaction.Details.erase(Satisfaction.Details.begin() +
+ EffectiveDetailEndIndex,
+ Satisfaction.Details.end());
+ break;
+ }
+ if (Satisfaction.IsSatisfied != Conjunction)
+ return Out;
+ }
+
+ return Out;
+}
+
+ExprResult ConstraintSatisfactionChecker::Evaluate(
+ const FoldExpandedConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL) {
+
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(Constraint.getPattern());
+ HashParameterMapping(S, MLTAL, ID, std::nullopt).VisitConstraint(Constraint);
+
+ if (auto Iter = S.UnsubstitutedConstraintSatisfactionCache.find(ID);
+ Iter != S.UnsubstitutedConstraintSatisfactionCache.end()) {
+
+ auto &Cached = Iter->second.Satisfaction;
+ Satisfaction.ContainsErrors = Cached.ContainsErrors;
+ Satisfaction.IsSatisfied = Cached.IsSatisfied;
+ Satisfaction.Details.insert(Satisfaction.Details.end(),
+ Cached.Details.begin(), Cached.Details.end());
+ return Iter->second.SubstExpr;
+ }
+
+ unsigned Size = Satisfaction.Details.size();
+
+ ExprResult E = EvaluateSlow(Constraint, MLTAL);
+ UnsubstitutedConstraintSatisfactionCacheResult Cache;
+ Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
+ Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
+ std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
+ std::back_inserter(Cache.Satisfaction.Details));
+ Cache.SubstExpr = E;
+ S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
+ return E;
+}
+
+ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
+ const ConceptIdConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL, unsigned Size) {
+ const ConceptReference *ConceptId = Constraint.getConceptId();
+
+ llvm::SmallVector<TemplateArgument> SubstitutedOuterMost;
+ std::optional<MultiLevelTemplateArgumentList> SubstitutedArgs =
+ SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOuterMost);
+
+ if (!SubstitutedArgs) {
+ Satisfaction.IsSatisfied = false;
+ // FIXME: diagnostics?
+ return ExprError();
+ }
+
+ Sema::SFINAETrap Trap(S);
+ Sema::ArgPackSubstIndexRAII SubstIndex(
+ S, Constraint.getPackSubstitutionIndex()
+ ? Constraint.getPackSubstitutionIndex()
+ : PackSubstitutionIndex);
+
+ const ASTTemplateArgumentListInfo *Ori =
+ ConceptId->getTemplateArgsAsWritten();
+ TemplateDeductionInfo Info(TemplateNameLoc);
+ Sema::InstantiatingTemplate _(
+ S, TemplateNameLoc, Sema::InstantiatingTemplate::ConstraintSubstitution{},
+ const_cast<NamedDecl *>(Template), Info, Constraint.getSourceRange());
+
+ TemplateArgumentListInfo OutArgs(Ori->LAngleLoc, Ori->RAngleLoc);
+ if (S.SubstTemplateArguments(Ori->arguments(), *SubstitutedArgs, OutArgs) ||
+ Trap.hasErrorOccurred()) {
+ Satisfaction.IsSatisfied = false;
+ if (!Trap.hasErrorOccurred())
+ return ExprError();
+
+ PartialDiagnosticAt SubstDiag{SourceLocation(),
+ PartialDiagnostic::NullDiagnostic()};
+ Info.takeSFINAEDiagnostic(SubstDiag);
+ // FIXME: This is an unfortunate consequence of there
+ // being no serialization code for PartialDiagnostics and the fact
+ // that serializing them would likely take a lot more storage than
+ // just storing them as strings. We would still like, in the
+ // future, to serialize the proper PartialDiagnostic as serializing
+ // it as a string defeats the purpose of the diagnostic mechanism.
+ Satisfaction.Details.insert(
+ Satisfaction.Details.begin() + Size,
+ new (S.Context) ConstraintSubstitutionDiagnostic{
+ SubstDiag.first,
+ allocateStringFromConceptDiagnostic(S, SubstDiag.second)});
+ return ExprError();
+ }
+
+ CXXScopeSpec SS;
+ SS.Adopt(ConceptId->getNestedNameSpecifierLoc());
+
+ ExprResult SubstitutedConceptId = S.CheckConceptTemplateId(
+ SS, ConceptId->getTemplateKWLoc(), ConceptId->getConceptNameInfo(),
+ ConceptId->getFoundDecl(), ConceptId->getNamedConcept(), &OutArgs,
+ /*DoCheckConstraintSatisfaction=*/false);
+
+ if (SubstitutedConceptId.isInvalid() || Trap.hasErrorOccurred())
+ return ExprError();
+
+ if (Size != Satisfaction.Details.size()) {
+ Satisfaction.Details.insert(
+ Satisfaction.Details.begin() + Size,
+ UnsatisfiedConstraintRecord(
+ SubstitutedConceptId.getAs<ConceptSpecializationExpr>()
+ ->getConceptReference()));
+ }
+ return SubstitutedConceptId;
+}
+
+ExprResult ConstraintSatisfactionChecker::Evaluate(
+ const ConceptIdConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL) {
+
+ const ConceptReference *ConceptId = Constraint.getConceptId();
+
+ UnsignedOrNone OuterPackSubstIndex =
+ Constraint.getPackSubstitutionIndex()
+ ? Constraint.getPackSubstitutionIndex()
+ : PackSubstitutionIndex;
+
+ Sema::InstantiatingTemplate _(S, ConceptId->getBeginLoc(),
+ Sema::InstantiatingTemplate::ConstraintsCheck{},
+ ConceptId->getNamedConcept(),
+ MLTAL.getInnermost(),
+ Constraint.getSourceRange());
+
+ unsigned Size = Satisfaction.Details.size();
+
+ ExprResult E = Evaluate(Constraint.getNormalizedConstraint(), MLTAL);
+
+ if (!E.isUsable()) {
+ Satisfaction.Details.insert(Satisfaction.Details.begin() + Size, ConceptId);
+ return E;
+ }
+
+ // ConceptIdConstraint is only relevant for diagnostics,
+ // so if the normalized constraint is satisfied, we should not
+ // substitute into the constraint.
+ if (Satisfaction.IsSatisfied)
+ return E;
+
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(Constraint.getConceptId());
+ ID.AddInteger(OuterPackSubstIndex.toInternalRepresentation());
+ HashParameterMapping(S, MLTAL, ID, OuterPackSubstIndex)
+ .VisitConstraint(Constraint);
+
+ if (auto Iter = S.UnsubstitutedConstraintSatisfactionCache.find(ID);
+ Iter != S.UnsubstitutedConstraintSatisfactionCache.end()) {
+
+ auto &Cached = Iter->second.Satisfaction;
+ Satisfaction.ContainsErrors = Cached.ContainsErrors;
+ Satisfaction.IsSatisfied = Cached.IsSatisfied;
+ Satisfaction.Details.insert(Satisfaction.Details.begin() + Size,
+ Cached.Details.begin(), Cached.Details.end());
+ return Iter->second.SubstExpr;
+ }
+
+ ExprResult CE = EvaluateSlow(Constraint, MLTAL, Size);
+ if (CE.isInvalid())
+ return E;
+ UnsubstitutedConstraintSatisfactionCacheResult Cache;
+ Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
+ Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
+ std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
+ std::back_inserter(Cache.Satisfaction.Details));
+ Cache.SubstExpr = CE;
+ S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
+ return CE;
+}
+
+ExprResult ConstraintSatisfactionChecker::Evaluate(
+ const CompoundConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL) {
+
+ unsigned EffectiveDetailEndIndex = Satisfaction.Details.size();
+
+ bool Conjunction =
+ Constraint.getCompoundKind() == NormalizedConstraint::CCK_Conjunction;
+
+ ExprResult LHS = Evaluate(Constraint.getLHS(), MLTAL);
+
+ if (Conjunction && (!Satisfaction.IsSatisfied || Satisfaction.ContainsErrors))
+ return LHS;
+
+ if (!Conjunction && LHS.isUsable() && Satisfaction.IsSatisfied &&
+ !Satisfaction.ContainsErrors)
+ return LHS;
+
+ Satisfaction.ContainsErrors = false;
+ Satisfaction.IsSatisfied = false;
+
+ ExprResult RHS = Evaluate(Constraint.getRHS(), MLTAL);
+
+ if (RHS.isUsable() && Satisfaction.IsSatisfied &&
+ !Satisfaction.ContainsErrors)
+ Satisfaction.Details.erase(Satisfaction.Details.begin() +
+ EffectiveDetailEndIndex,
+ Satisfaction.Details.end());
+
+ if (!LHS.isUsable())
+ return RHS;
+
+ if (!RHS.isUsable())
+ return LHS;
+
+ return BinaryOperator::Create(S.Context, LHS.get(), RHS.get(),
+ Conjunction ? BinaryOperatorKind::BO_LAnd
+ : BinaryOperatorKind::BO_LOr,
+ S.Context.BoolTy, VK_PRValue, OK_Ordinary,
+ Constraint.getBeginLoc(), FPOptionsOverride{});
+}
+
+ExprResult ConstraintSatisfactionChecker::Evaluate(
+ const NormalizedConstraint &Constraint,
+ const MultiLevelTemplateArgumentList &MLTAL) {
+ switch (Constraint.getKind()) {
+ case NormalizedConstraint::ConstraintKind::Atomic:
+ return Evaluate(static_cast<const AtomicConstraint &>(Constraint), MLTAL);
+
+ case NormalizedConstraint::ConstraintKind::FoldExpanded:
+ return Evaluate(static_cast<const FoldExpandedConstraint &>(Constraint),
+ MLTAL);
+
+ case NormalizedConstraint::ConstraintKind::ConceptId:
+ return Evaluate(static_cast<const ConceptIdConstraint &>(Constraint),
+ MLTAL);
+
+ case NormalizedConstraint::ConstraintKind::Compound:
+ return Evaluate(static_cast<const CompoundConstraint &>(Constraint), MLTAL);
+ }
}
static bool CheckConstraintSatisfaction(
Sema &S, const NamedDecl *Template,
ArrayRef<AssociatedConstraint> AssociatedConstraints,
- llvm::SmallVectorImpl<Expr *> &Converted,
const MultiLevelTemplateArgumentList &TemplateArgsLists,
- SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction) {
+ SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction,
+ Expr **ConvertedExpr, const ConceptReference *TopLevelConceptId = nullptr) {
+
+ if (ConvertedExpr)
+ *ConvertedExpr = nullptr;
+
if (AssociatedConstraints.empty()) {
Satisfaction.IsSatisfied = true;
return false;
@@ -578,57 +1069,60 @@ static bool CheckConstraintSatisfaction(
return false;
}
- ArrayRef<TemplateArgument> TemplateArgs =
- TemplateArgsLists.getNumSubstitutedLevels() > 0
- ? TemplateArgsLists.getOutermost()
- : ArrayRef<TemplateArgument>{};
- Sema::InstantiatingTemplate Inst(S, TemplateIDRange.getBegin(),
- Sema::InstantiatingTemplate::ConstraintsCheck{},
- const_cast<NamedDecl *>(Template), TemplateArgs, TemplateIDRange);
- if (Inst.isInvalid())
+ llvm::ArrayRef<TemplateArgument> Args;
+ if (TemplateArgsLists.getNumLevels() != 0)
+ Args = TemplateArgsLists.getInnermost();
+
+ std::optional<Sema::InstantiatingTemplate> SynthesisContext;
+ if (!TopLevelConceptId) {
+ SynthesisContext.emplace(S, TemplateIDRange.getBegin(),
+ Sema::InstantiatingTemplate::ConstraintsCheck{},
+ const_cast<NamedDecl *>(Template), Args,
+ TemplateIDRange);
+ }
+
+ const NormalizedConstraint *C =
+ S.getNormalizedAssociatedConstraints(Template, AssociatedConstraints);
+ if (!C) {
+ Satisfaction.IsSatisfied = false;
return true;
+ }
- for (const AssociatedConstraint &AC : AssociatedConstraints) {
- if (AC.isNull())
- return true;
+ if (TopLevelConceptId)
+ C = ConceptIdConstraint::Create(S.getASTContext(), TopLevelConceptId,
+ const_cast<NormalizedConstraint *>(C),
+ Template, /*CSE=*/nullptr,
+ S.ArgPackSubstIndex);
- Sema::ArgPackSubstIndexRAII _(S, AC.ArgPackSubstIndex);
- ExprResult Res = calculateConstraintSatisfaction(
- S, Template, TemplateIDRange.getBegin(), TemplateArgsLists,
- AC.ConstraintExpr, Satisfaction);
- if (Res.isInvalid())
- return true;
+ ExprResult Res =
+ ConstraintSatisfactionChecker(S, Template, TemplateIDRange.getBegin(),
+ S.ArgPackSubstIndex, Satisfaction)
+ .Evaluate(*C, TemplateArgsLists);
+
+ if (Res.isInvalid())
+ return true;
+
+ if (Res.isUsable() && ConvertedExpr)
+ *ConvertedExpr = Res.get();
- Converted.push_back(Res.get());
- if (!Satisfaction.IsSatisfied) {
- // Backfill the 'converted' list with nulls so we can keep the Converted
- // and unconverted lists in sync.
- Converted.append(AssociatedConstraints.size() - Converted.size(),
- nullptr);
- // [temp.constr.op] p2
- // [...] To determine if a conjunction is satisfied, the satisfaction
- // of the first operand is checked. If that is not satisfied, the
- // conjunction is not satisfied. [...]
- return false;
- }
- }
return false;
}
bool Sema::CheckConstraintSatisfaction(
- const NamedDecl *Template,
+ ConstrainedDeclOrNestedRequirement Entity,
ArrayRef<AssociatedConstraint> AssociatedConstraints,
- llvm::SmallVectorImpl<Expr *> &ConvertedConstraints,
const MultiLevelTemplateArgumentList &TemplateArgsLists,
- SourceRange TemplateIDRange, ConstraintSatisfaction &OutSatisfaction) {
+ SourceRange TemplateIDRange, ConstraintSatisfaction &OutSatisfaction,
+ const ConceptReference *TopLevelConceptId, Expr **ConvertedExpr) {
if (AssociatedConstraints.empty()) {
OutSatisfaction.IsSatisfied = true;
return false;
}
+ const auto *Template = Entity.dyn_cast<const NamedDecl *>();
if (!Template) {
return ::CheckConstraintSatisfaction(
- *this, nullptr, AssociatedConstraints, ConvertedConstraints,
- TemplateArgsLists, TemplateIDRange, OutSatisfaction);
+ *this, nullptr, AssociatedConstraints, TemplateArgsLists,
+ TemplateIDRange, OutSatisfaction, ConvertedExpr, TopLevelConceptId);
}
// Invalid templates could make their way here. Substituting them could result
// in dependent expressions.
@@ -643,10 +1137,15 @@ bool Sema::CheckConstraintSatisfaction(
// here.
llvm::SmallVector<TemplateArgument, 4> FlattenedArgs;
for (auto List : TemplateArgsLists)
- llvm::append_range(FlattenedArgs, List.Args);
+ for (const TemplateArgument &Arg : List.Args)
+ FlattenedArgs.emplace_back(Context.getCanonicalTemplateArgument(Arg));
+
+ const NamedDecl *Owner = Template;
+ if (TopLevelConceptId)
+ Owner = TopLevelConceptId->getNamedConcept();
llvm::FoldingSetNodeID ID;
- ConstraintSatisfaction::Profile(ID, Context, Template, FlattenedArgs);
+ ConstraintSatisfaction::Profile(ID, Context, Owner, FlattenedArgs);
void *InsertPos;
if (auto *Cached = SatisfactionCache.FindNodeOrInsertPos(ID, InsertPos)) {
OutSatisfaction = *Cached;
@@ -654,11 +1153,11 @@ bool Sema::CheckConstraintSatisfaction(
}
auto Satisfaction =
- std::make_unique<ConstraintSatisfaction>(Template, FlattenedArgs);
- if (::CheckConstraintSatisfaction(*this, Template, AssociatedConstraints,
- ConvertedConstraints, TemplateArgsLists,
- TemplateIDRange, *Satisfaction)) {
- OutSatisfaction = *Satisfaction;
+ std::make_unique<ConstraintSatisfaction>(Owner, FlattenedArgs);
+ if (::CheckConstraintSatisfaction(
+ *this, Template, AssociatedConstraints, TemplateArgsLists,
+ TemplateIDRange, *Satisfaction, ConvertedExpr, TopLevelConceptId)) {
+ OutSatisfaction = std::move(*Satisfaction);
return true;
}
@@ -688,14 +1187,18 @@ bool Sema::CheckConstraintSatisfaction(
const ConceptSpecializationExpr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction) {
+ llvm::SmallVector<AssociatedConstraint, 1> Constraints;
+ Constraints.emplace_back(
+ ConstraintExpr->getNamedConcept()->getConstraintExpr());
+
MultiLevelTemplateArgumentList MLTAL(ConstraintExpr->getNamedConcept(),
ConstraintExpr->getTemplateArguments(),
true);
- return calculateConstraintSatisfaction(
- *this, ConstraintExpr, ConstraintExpr->getNamedConcept(),
- ConstraintExpr->getConceptNameLoc(), MLTAL, Satisfaction)
- .isInvalid();
+ return CheckConstraintSatisfaction(
+ ConstraintExpr->getNamedConcept(), Constraints, MLTAL,
+ ConstraintExpr->getSourceRange(), Satisfaction,
+ ConstraintExpr->getConceptReference());
}
bool Sema::SetupConstraintScope(
@@ -854,50 +1357,6 @@ bool Sema::CheckFunctionConstraints(const FunctionDecl *FD,
Satisfaction);
}
-
-// Figure out the to-translation-unit depth for this function declaration for
-// the purpose of seeing if they differ by constraints. This isn't the same as
-// getTemplateDepth, because it includes already instantiated parents.
-static unsigned
-CalculateTemplateDepthForConstraints(Sema &S, const NamedDecl *ND,
- bool SkipForSpecialization = false) {
- MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
- ND, ND->getLexicalDeclContext(), /*Final=*/false,
- /*Innermost=*/std::nullopt,
- /*RelativeToPrimary=*/true,
- /*Pattern=*/nullptr,
- /*ForConstraintInstantiation=*/true, SkipForSpecialization);
- return MLTAL.getNumLevels();
-}
-
-namespace {
- class AdjustConstraintDepth : public TreeTransform<AdjustConstraintDepth> {
- unsigned TemplateDepth = 0;
- public:
- using inherited = TreeTransform<AdjustConstraintDepth>;
- AdjustConstraintDepth(Sema &SemaRef, unsigned TemplateDepth)
- : inherited(SemaRef), TemplateDepth(TemplateDepth) {}
-
- using inherited::TransformTemplateTypeParmType;
- QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB,
- TemplateTypeParmTypeLoc TL, bool) {
- const TemplateTypeParmType *T = TL.getTypePtr();
-
- TemplateTypeParmDecl *NewTTPDecl = nullptr;
- if (TemplateTypeParmDecl *OldTTPDecl = T->getDecl())
- NewTTPDecl = cast_or_null<TemplateTypeParmDecl>(
- TransformDecl(TL.getNameLoc(), OldTTPDecl));
-
- QualType Result = getSema().Context.getTemplateTypeParmType(
- T->getDepth() + TemplateDepth, T->getIndex(), T->isParameterPack(),
- NewTTPDecl);
- TemplateTypeParmTypeLoc NewTL = TLB.push<TemplateTypeParmTypeLoc>(Result);
- NewTL.setNameLoc(TL.getNameLoc());
- return Result;
- }
- };
-} // namespace
-
static const Expr *SubstituteConstraintExpressionWithoutSatisfaction(
Sema &S, const Sema::TemplateCompareNewDeclInfo &DeclInfo,
const Expr *ConstrExpr) {
@@ -1161,73 +1620,61 @@ bool Sema::CheckFunctionTemplateConstraints(
static void diagnoseUnsatisfiedRequirement(Sema &S,
concepts::ExprRequirement *Req,
bool First) {
- assert(!Req->isSatisfied()
- && "Diagnose() can only be used on an unsatisfied requirement");
+ assert(!Req->isSatisfied() &&
+ "Diagnose() can only be used on an unsatisfied requirement");
switch (Req->getSatisfactionStatus()) {
- case concepts::ExprRequirement::SS_Dependent:
- llvm_unreachable("Diagnosing a dependent requirement");
- break;
- case concepts::ExprRequirement::SS_ExprSubstitutionFailure: {
- auto *SubstDiag = Req->getExprSubstitutionDiagnostic();
- if (!SubstDiag->DiagMessage.empty())
- S.Diag(SubstDiag->DiagLoc,
- diag::note_expr_requirement_expr_substitution_error)
- << (int)First << SubstDiag->SubstitutedEntity
- << SubstDiag->DiagMessage;
- else
- S.Diag(SubstDiag->DiagLoc,
- diag::note_expr_requirement_expr_unknown_substitution_error)
- << (int)First << SubstDiag->SubstitutedEntity;
- break;
- }
- case concepts::ExprRequirement::SS_NoexceptNotMet:
- S.Diag(Req->getNoexceptLoc(),
- diag::note_expr_requirement_noexcept_not_met)
- << (int)First << Req->getExpr();
- break;
- case concepts::ExprRequirement::SS_TypeRequirementSubstitutionFailure: {
- auto *SubstDiag =
- Req->getReturnTypeRequirement().getSubstitutionDiagnostic();
- if (!SubstDiag->DiagMessage.empty())
- S.Diag(SubstDiag->DiagLoc,
- diag::note_expr_requirement_type_requirement_substitution_error)
- << (int)First << SubstDiag->SubstitutedEntity
- << SubstDiag->DiagMessage;
- else
- S.Diag(SubstDiag->DiagLoc,
- diag::note_expr_requirement_type_requirement_unknown_substitution_error)
- << (int)First << SubstDiag->SubstitutedEntity;
- break;
- }
- case concepts::ExprRequirement::SS_ConstraintsNotSatisfied: {
- ConceptSpecializationExpr *ConstraintExpr =
- Req->getReturnTypeRequirementSubstitutedConstraintExpr();
- if (ConstraintExpr->getTemplateArgsAsWritten()->NumTemplateArgs == 1) {
- // A simple case - expr type is the type being constrained and the concept
- // was not provided arguments.
- Expr *e = Req->getExpr();
- S.Diag(e->getBeginLoc(),
- diag::note_expr_requirement_constraints_not_satisfied_simple)
- << (int)First << S.Context.getReferenceQualifiedType(e)
- << ConstraintExpr->getNamedConcept();
- } else {
- S.Diag(ConstraintExpr->getBeginLoc(),
- diag::note_expr_requirement_constraints_not_satisfied)
- << (int)First << ConstraintExpr;
- }
- S.DiagnoseUnsatisfiedConstraint(ConstraintExpr->getSatisfaction());
- break;
- }
- case concepts::ExprRequirement::SS_Satisfied:
- llvm_unreachable("We checked this above");
+ case concepts::ExprRequirement::SS_Dependent:
+ llvm_unreachable("Diagnosing a dependent requirement");
+ break;
+ case concepts::ExprRequirement::SS_ExprSubstitutionFailure: {
+ auto *SubstDiag = Req->getExprSubstitutionDiagnostic();
+ if (!SubstDiag->DiagMessage.empty())
+ S.Diag(SubstDiag->DiagLoc,
+ diag::note_expr_requirement_expr_substitution_error)
+ << (int)First << SubstDiag->SubstitutedEntity
+ << SubstDiag->DiagMessage;
+ else
+ S.Diag(SubstDiag->DiagLoc,
+ diag::note_expr_requirement_expr_unknown_substitution_error)
+ << (int)First << SubstDiag->SubstitutedEntity;
+ break;
+ }
+ case concepts::ExprRequirement::SS_NoexceptNotMet:
+ S.Diag(Req->getNoexceptLoc(), diag::note_expr_requirement_noexcept_not_met)
+ << (int)First << Req->getExpr();
+ break;
+ case concepts::ExprRequirement::SS_TypeRequirementSubstitutionFailure: {
+ auto *SubstDiag =
+ Req->getReturnTypeRequirement().getSubstitutionDiagnostic();
+ if (!SubstDiag->DiagMessage.empty())
+ S.Diag(SubstDiag->DiagLoc,
+ diag::note_expr_requirement_type_requirement_substitution_error)
+ << (int)First << SubstDiag->SubstitutedEntity
+ << SubstDiag->DiagMessage;
+ else
+ S.Diag(
+ SubstDiag->DiagLoc,
+ diag::
+ note_expr_requirement_type_requirement_unknown_substitution_error)
+ << (int)First << SubstDiag->SubstitutedEntity;
+ break;
+ }
+ case concepts::ExprRequirement::SS_ConstraintsNotSatisfied: {
+ ConceptSpecializationExpr *ConstraintExpr =
+ Req->getReturnTypeRequirementSubstitutedConstraintExpr();
+ S.DiagnoseUnsatisfiedConstraint(ConstraintExpr);
+ break;
+ }
+ case concepts::ExprRequirement::SS_Satisfied:
+ llvm_unreachable("We checked this above");
}
}
static void diagnoseUnsatisfiedRequirement(Sema &S,
concepts::TypeRequirement *Req,
bool First) {
- assert(!Req->isSatisfied()
- && "Diagnose() can only be used on an unsatisfied requirement");
+ assert(!Req->isSatisfied() &&
+ "Diagnose() can only be used on an unsatisfied requirement");
switch (Req->getSatisfactionStatus()) {
case concepts::TypeRequirement::SS_Dependent:
llvm_unreachable("Diagnosing a dependent requirement");
@@ -1235,9 +1682,9 @@ static void diagnoseUnsatisfiedRequirement(Sema &S,
case concepts::TypeRequirement::SS_SubstitutionFailure: {
auto *SubstDiag = Req->getSubstitutionDiagnostic();
if (!SubstDiag->DiagMessage.empty())
- S.Diag(SubstDiag->DiagLoc,
- diag::note_type_requirement_substitution_error) << (int)First
- << SubstDiag->SubstitutedEntity << SubstDiag->DiagMessage;
+ S.Diag(SubstDiag->DiagLoc, diag::note_type_requirement_substitution_error)
+ << (int)First << SubstDiag->SubstitutedEntity
+ << SubstDiag->DiagMessage;
else
S.Diag(SubstDiag->DiagLoc,
diag::note_type_requirement_unknown_substitution_error)
@@ -1249,31 +1696,53 @@ static void diagnoseUnsatisfiedRequirement(Sema &S,
return;
}
}
-static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
- Expr *SubstExpr,
- bool First = true);
+
+static void diagnoseUnsatisfiedConceptIdExpr(Sema &S,
+ const ConceptReference *Concept,
+ SourceLocation Loc, bool First) {
+ if (Concept->getTemplateArgsAsWritten()->NumTemplateArgs == 1) {
+ S.Diag(
+ Loc,
+ diag::
+ note_single_arg_concept_specialization_constraint_evaluated_to_false)
+ << (int)First
+ << Concept->getTemplateArgsAsWritten()->arguments()[0].getArgument()
+ << Concept->getNamedConcept();
+ } else {
+ S.Diag(Loc, diag::note_concept_specialization_constraint_evaluated_to_false)
+ << (int)First << Concept;
+ }
+}
+
+static void diagnoseUnsatisfiedConstraintExpr(
+ Sema &S, const UnsatisfiedConstraintRecord &Record, SourceLocation Loc,
+ bool First, concepts::NestedRequirement *Req = nullptr);
+
+static void DiagnoseUnsatisfiedConstraint(
+ Sema &S, ArrayRef<UnsatisfiedConstraintRecord> Records, SourceLocation Loc,
+ bool First = true, concepts::NestedRequirement *Req = nullptr) {
+ for (auto &Record : Records) {
+ diagnoseUnsatisfiedConstraintExpr(S, Record, Loc, First, Req);
+ Loc = {};
+ First = isa<const ConceptReference *>(Record);
+ }
+}
static void diagnoseUnsatisfiedRequirement(Sema &S,
concepts::NestedRequirement *Req,
bool First) {
- using SubstitutionDiagnostic = std::pair<SourceLocation, StringRef>;
- for (auto &Record : Req->getConstraintSatisfaction()) {
- if (auto *SubstDiag = Record.dyn_cast<SubstitutionDiagnostic *>())
- S.Diag(SubstDiag->first, diag::note_nested_requirement_substitution_error)
- << (int)First << Req->getInvalidConstraintEntity()
- << SubstDiag->second;
- else
- diagnoseWellFormedUnsatisfiedConstraintExpr(S, Record.dyn_cast<Expr *>(),
- First);
- First = false;
- }
+ DiagnoseUnsatisfiedConstraint(S, Req->getConstraintSatisfaction().records(),
+ Req->hasInvalidConstraint()
+ ? SourceLocation()
+ : Req->getConstraintExpr()->getExprLoc(),
+ First, Req);
}
static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
- Expr *SubstExpr,
+ const Expr *SubstExpr,
bool First) {
SubstExpr = SubstExpr->IgnoreParenImpCasts();
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SubstExpr)) {
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(SubstExpr)) {
switch (BO->getOpcode()) {
// These two cases will in practice only be reached when using fold
// expressions with || and &&, since otherwise the || and && will have been
@@ -1319,7 +1788,7 @@ static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
BO->getRHS()->EvaluateAsInt(SimplifiedRHS, S.Context,
Expr::SE_NoSideEffects,
/*InConstantContext=*/true);
- if (!SimplifiedLHS.Diag && ! SimplifiedRHS.Diag) {
+ if (!SimplifiedLHS.Diag && !SimplifiedRHS.Diag) {
S.Diag(SubstExpr->getBeginLoc(),
diag::note_atomic_constraint_evaluated_to_false_elaborated)
<< (int)First << SubstExpr
@@ -1334,22 +1803,6 @@ static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
default:
break;
}
- } else if (auto *CSE = dyn_cast<ConceptSpecializationExpr>(SubstExpr)) {
- if (CSE->getTemplateArgsAsWritten()->NumTemplateArgs == 1) {
- S.Diag(
- CSE->getSourceRange().getBegin(),
- diag::
- note_single_arg_concept_specialization_constraint_evaluated_to_false)
- << (int)First
- << CSE->getTemplateArgsAsWritten()->arguments()[0].getArgument()
- << CSE->getNamedConcept();
- } else {
- S.Diag(SubstExpr->getSourceRange().getBegin(),
- diag::note_concept_specialization_constraint_evaluated_to_false)
- << (int)First << CSE;
- }
- S.DiagnoseUnsatisfiedConstraint(CSE->getSatisfaction());
- return;
} else if (auto *RE = dyn_cast<RequiresExpr>(SubstExpr)) {
// FIXME: RequiresExpr should store dependent diagnostics.
for (concepts::Requirement *Req : RE->getRequirements())
@@ -1364,6 +1817,10 @@ static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
break;
}
return;
+ } else if (auto *CSE = dyn_cast<ConceptSpecializationExpr>(SubstExpr)) {
+ // Drill down concept ids treated as atomic constraints
+ S.DiagnoseUnsatisfiedConstraint(CSE, First);
+ return;
} else if (auto *TTE = dyn_cast<TypeTraitExpr>(SubstExpr);
TTE && TTE->getTrait() == clang::TypeTrait::BTT_IsDeducible) {
assert(TTE->getNumArgs() == 2);
@@ -1379,216 +1836,332 @@ static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
S.DiagnoseTypeTraitDetails(SubstExpr);
}
-template <typename SubstitutionDiagnostic>
static void diagnoseUnsatisfiedConstraintExpr(
- Sema &S, const llvm::PointerUnion<Expr *, SubstitutionDiagnostic *> &Record,
- bool First = true) {
- if (auto *Diag = Record.template dyn_cast<SubstitutionDiagnostic *>()) {
- S.Diag(Diag->first, diag::note_substituted_constraint_expr_is_ill_formed)
- << Diag->second;
+ Sema &S, const UnsatisfiedConstraintRecord &Record, SourceLocation Loc,
+ bool First, concepts::NestedRequirement *Req) {
+ if (auto *Diag =
+ Record
+ .template dyn_cast<const ConstraintSubstitutionDiagnostic *>()) {
+ if (Req)
+ S.Diag(Diag->first, diag::note_nested_requirement_substitution_error)
+ << (int)First << Req->getInvalidConstraintEntity() << Diag->second;
+ else
+ S.Diag(Diag->first, diag::note_substituted_constraint_expr_is_ill_formed)
+ << Diag->second;
return;
}
-
- diagnoseWellFormedUnsatisfiedConstraintExpr(S, cast<Expr *>(Record), First);
+ if (const auto *Concept = dyn_cast<const ConceptReference *>(Record)) {
+ if (Loc.isInvalid())
+ Loc = Concept->getBeginLoc();
+ diagnoseUnsatisfiedConceptIdExpr(S, Concept, Loc, First);
+ return;
+ }
+ diagnoseWellFormedUnsatisfiedConstraintExpr(
+ S, cast<const class Expr *>(Record), First);
}
-void
-Sema::DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction& Satisfaction,
- bool First) {
+void Sema::DiagnoseUnsatisfiedConstraint(
+ const ConstraintSatisfaction &Satisfaction, SourceLocation Loc,
+ bool First) {
+
assert(!Satisfaction.IsSatisfied &&
"Attempted to diagnose a satisfied constraint");
- for (auto &Record : Satisfaction.Details) {
- diagnoseUnsatisfiedConstraintExpr(*this, Record, First);
- First = false;
- }
+ ::DiagnoseUnsatisfiedConstraint(*this, Satisfaction.Details, Loc, First);
}
void Sema::DiagnoseUnsatisfiedConstraint(
- const ASTConstraintSatisfaction &Satisfaction,
- bool First) {
+ const ConceptSpecializationExpr *ConstraintExpr, bool First) {
+
+ const ASTConstraintSatisfaction &Satisfaction =
+ ConstraintExpr->getSatisfaction();
+
assert(!Satisfaction.IsSatisfied &&
"Attempted to diagnose a satisfied constraint");
- for (auto &Record : Satisfaction) {
- diagnoseUnsatisfiedConstraintExpr(*this, Record, First);
- First = false;
- }
+
+ ::DiagnoseUnsatisfiedConstraint(*this, Satisfaction.records(),
+ ConstraintExpr->getBeginLoc(), First);
}
-const NormalizedConstraint *Sema::getNormalizedAssociatedConstraints(
- const NamedDecl *ConstrainedDecl,
- ArrayRef<AssociatedConstraint> AssociatedConstraints) {
- // In case the ConstrainedDecl comes from modules, it is necessary to use
- // the canonical decl to avoid different atomic constraints with the 'same'
- // declarations.
- ConstrainedDecl = cast<NamedDecl>(ConstrainedDecl->getCanonicalDecl());
+namespace {
- auto CacheEntry = NormalizationCache.find(ConstrainedDecl);
- if (CacheEntry == NormalizationCache.end()) {
- auto Normalized = NormalizedConstraint::fromAssociatedConstraints(
- *this, ConstrainedDecl, AssociatedConstraints);
- CacheEntry =
- NormalizationCache
- .try_emplace(ConstrainedDecl,
- Normalized
- ? new (Context) NormalizedConstraint(
- std::move(*Normalized))
- : nullptr)
- .first;
- }
- return CacheEntry->second;
-}
+class SubstituteParameterMappings {
+ Sema &SemaRef;
-const NormalizedConstraint *clang::getNormalizedAssociatedConstraints(
- Sema &S, const NamedDecl *ConstrainedDecl,
- ArrayRef<AssociatedConstraint> AssociatedConstraints) {
- return S.getNormalizedAssociatedConstraints(ConstrainedDecl,
- AssociatedConstraints);
-}
+ const MultiLevelTemplateArgumentList *MLTAL;
+ const ASTTemplateArgumentListInfo *ArgsAsWritten;
-static bool
-substituteParameterMappings(Sema &S, NormalizedConstraint &N,
- ConceptDecl *Concept,
- const MultiLevelTemplateArgumentList &MLTAL,
- const ASTTemplateArgumentListInfo *ArgsAsWritten) {
+ bool InFoldExpr;
- if (N.isCompound()) {
- if (substituteParameterMappings(S, N.getLHS(), Concept, MLTAL,
- ArgsAsWritten))
- return true;
- return substituteParameterMappings(S, N.getRHS(), Concept, MLTAL,
- ArgsAsWritten);
- }
+ SubstituteParameterMappings(Sema &SemaRef,
+ const MultiLevelTemplateArgumentList *MLTAL,
+ const ASTTemplateArgumentListInfo *ArgsAsWritten,
+ bool InFoldExpr)
+ : SemaRef(SemaRef), MLTAL(MLTAL), ArgsAsWritten(ArgsAsWritten),
+ InFoldExpr(InFoldExpr) {}
+
+ void buildParameterMapping(NormalizedConstraintWithParamMapping &N);
+
+ bool substitute(NormalizedConstraintWithParamMapping &N);
+
+ bool substitute(ConceptIdConstraint &CC);
+
+public:
+ SubstituteParameterMappings(Sema &SemaRef, bool InFoldExpr = false)
+ : SemaRef(SemaRef), MLTAL(nullptr), ArgsAsWritten(nullptr),
+ InFoldExpr(InFoldExpr) {}
+
+ bool substitute(NormalizedConstraint &N);
+};
- if (N.isFoldExpanded()) {
- Sema::ArgPackSubstIndexRAII _(S, std::nullopt);
- return substituteParameterMappings(
- S, N.getFoldExpandedConstraint()->Constraint, Concept, MLTAL,
- ArgsAsWritten);
+void SubstituteParameterMappings::buildParameterMapping(
+ NormalizedConstraintWithParamMapping &N) {
+ TemplateParameterList *TemplateParams =
+ cast<TemplateDecl>(N.getConstraintDecl())->getTemplateParameters();
+
+ llvm::SmallBitVector OccurringIndices(TemplateParams->size());
+ llvm::SmallBitVector OccurringIndicesForSubsumption(TemplateParams->size());
+
+ if (N.getKind() == NormalizedConstraint::ConstraintKind::Atomic) {
+ SemaRef.MarkUsedTemplateParameters(
+ static_cast<AtomicConstraint &>(N).getConstraintExpr(),
+ /*OnlyDeduced=*/false,
+ /*Depth=*/0, OccurringIndices);
+
+ SemaRef.MarkUsedTemplateParametersForSubsumptionParameterMapping(
+ static_cast<AtomicConstraint &>(N).getConstraintExpr(),
+ /*Depth=*/0, OccurringIndicesForSubsumption);
+
+ } else if (N.getKind() ==
+ NormalizedConstraint::ConstraintKind::FoldExpanded) {
+ SemaRef.MarkUsedTemplateParameters(
+ static_cast<FoldExpandedConstraint &>(N).getPattern(),
+ /*OnlyDeduced=*/false,
+ /*Depth=*/0, OccurringIndices);
+ } else if (N.getKind() == NormalizedConstraint::ConstraintKind::ConceptId) {
+ auto *Args = static_cast<ConceptIdConstraint &>(N)
+ .getConceptId()
+ ->getTemplateArgsAsWritten();
+ if (Args)
+ SemaRef.MarkUsedTemplateParameters(Args->arguments(),
+ /*Depth=*/0, OccurringIndices);
}
+ TemplateArgumentLoc *TempArgs =
+ new (SemaRef.Context) TemplateArgumentLoc[OccurringIndices.count()];
+ llvm::SmallVector<NamedDecl *> UsedParams;
+ for (unsigned I = 0, J = 0, C = TemplateParams->size(); I != C; ++I) {
+ SourceLocation Loc = ArgsAsWritten->NumTemplateArgs > I
+ ? ArgsAsWritten->arguments()[I].getLocation()
+ : SourceLocation();
+ // FIXME: Investigate why we couldn't always preserve the SourceLoc. We
+ // can't assert Loc.isValid() now.
+ if (OccurringIndices[I]) {
+ NamedDecl *Param = TemplateParams->begin()[I];
+ new (&(TempArgs)[J]) TemplateArgumentLoc(
+ SemaRef.getIdentityTemplateArgumentLoc(Param, Loc));
+ UsedParams.push_back(Param);
+ J++;
+ }
+ }
+ auto *UsedList = TemplateParameterList::Create(
+ SemaRef.Context, TemplateParams->getTemplateLoc(),
+ TemplateParams->getLAngleLoc(), UsedParams,
+ /*RAngleLoc=*/SourceLocation(),
+ /*RequiresClause=*/nullptr);
+ unsigned Size = OccurringIndices.count();
+ N.updateParameterMapping(
+ std::move(OccurringIndices), std::move(OccurringIndicesForSubsumption),
+ MutableArrayRef<TemplateArgumentLoc>{TempArgs, Size}, UsedList);
+}
- TemplateParameterList *TemplateParams = Concept->getTemplateParameters();
+bool SubstituteParameterMappings::substitute(
+ NormalizedConstraintWithParamMapping &N) {
+ if (!N.hasParameterMapping())
+ buildParameterMapping(N);
- AtomicConstraint &Atomic = *N.getAtomicConstraint();
- TemplateArgumentListInfo SubstArgs;
- if (!Atomic.ParameterMapping) {
- llvm::SmallBitVector OccurringIndices(TemplateParams->size());
- S.MarkUsedTemplateParameters(Atomic.ConstraintExpr, /*OnlyDeduced=*/false,
- /*Depth=*/0, OccurringIndices);
- TemplateArgumentLoc *TempArgs =
- new (S.Context) TemplateArgumentLoc[OccurringIndices.count()];
- for (unsigned I = 0, J = 0, C = TemplateParams->size(); I != C; ++I)
- if (OccurringIndices[I])
- new (&(TempArgs)[J++])
- TemplateArgumentLoc(S.getIdentityTemplateArgumentLoc(
- TemplateParams->begin()[I],
- // Here we assume we do not support things like
- // template<typename A, typename B>
- // concept C = ...;
- //
- // template<typename... Ts> requires C<Ts...>
- // struct S { };
- // The above currently yields a diagnostic.
- // We still might have default arguments for concept parameters.
- ArgsAsWritten->NumTemplateArgs > I
- ? ArgsAsWritten->arguments()[I].getLocation()
- : SourceLocation()));
- Atomic.ParameterMapping.emplace(TempArgs, OccurringIndices.count());
- }
- SourceLocation InstLocBegin =
- ArgsAsWritten->arguments().empty()
- ? ArgsAsWritten->getLAngleLoc()
- : ArgsAsWritten->arguments().front().getSourceRange().getBegin();
- SourceLocation InstLocEnd =
- ArgsAsWritten->arguments().empty()
- ? ArgsAsWritten->getRAngleLoc()
- : ArgsAsWritten->arguments().front().getSourceRange().getEnd();
+ SourceLocation InstLocBegin, InstLocEnd;
+ llvm::ArrayRef Arguments = ArgsAsWritten->arguments();
+ if (Arguments.empty()) {
+ InstLocBegin = ArgsAsWritten->getLAngleLoc();
+ InstLocEnd = ArgsAsWritten->getRAngleLoc();
+ } else {
+ auto SR = Arguments[0].getSourceRange();
+ InstLocBegin = SR.getBegin();
+ InstLocEnd = SR.getEnd();
+ }
Sema::InstantiatingTemplate Inst(
- S, InstLocBegin,
+ SemaRef, InstLocBegin,
Sema::InstantiatingTemplate::ParameterMappingSubstitution{},
- const_cast<NamedDecl *>(Atomic.ConstraintDecl),
+ const_cast<NamedDecl *>(N.getConstraintDecl()),
{InstLocBegin, InstLocEnd});
if (Inst.isInvalid())
return true;
- if (S.SubstTemplateArguments(*Atomic.ParameterMapping, MLTAL, SubstArgs))
+
+ // TransformTemplateArguments is unable to preserve the source location of a
+ // pack. The SourceLocation is necessary for the instantiation location.
+ // FIXME: The BaseLoc will be used as the location of the pack expansion,
+ // which is wrong.
+ TemplateArgumentListInfo SubstArgs;
+ if (SemaRef.SubstTemplateArgumentsInParameterMapping(
+ N.getParameterMapping(), N.getBeginLoc(), *MLTAL, SubstArgs,
+ /*BuildPackExpansionTypes=*/!InFoldExpr))
+ return true;
+ Sema::CheckTemplateArgumentInfo CTAI;
+ auto *TD =
+ const_cast<TemplateDecl *>(cast<TemplateDecl>(N.getConstraintDecl()));
+ if (SemaRef.CheckTemplateArgumentList(TD, N.getUsedTemplateParamList(),
+ TD->getLocation(), SubstArgs,
+ /*DefaultArguments=*/{},
+ /*PartialTemplateArgs=*/false, CTAI))
return true;
TemplateArgumentLoc *TempArgs =
- new (S.Context) TemplateArgumentLoc[SubstArgs.size()];
- std::copy(SubstArgs.arguments().begin(), SubstArgs.arguments().end(),
- TempArgs);
- Atomic.ParameterMapping.emplace(TempArgs, SubstArgs.size());
+ new (SemaRef.Context) TemplateArgumentLoc[CTAI.SugaredConverted.size()];
+
+ for (unsigned I = 0; I < CTAI.SugaredConverted.size(); ++I) {
+ SourceLocation Loc;
+ // If this is an empty pack, we have no corresponding SubstArgs.
+ if (I < SubstArgs.size())
+ Loc = SubstArgs.arguments()[I].getLocation();
+
+ TempArgs[I] = SemaRef.getTrivialTemplateArgumentLoc(
+ CTAI.SugaredConverted[I], QualType(), Loc);
+ }
+
+ MutableArrayRef<TemplateArgumentLoc> Mapping(TempArgs,
+ CTAI.SugaredConverted.size());
+ N.updateParameterMapping(N.mappingOccurenceList(),
+ N.mappingOccurenceListForSubsumption(), Mapping,
+ N.getUsedTemplateParamList());
return false;
}
-static bool substituteParameterMappings(Sema &S, NormalizedConstraint &N,
- const ConceptSpecializationExpr *CSE) {
- MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
- CSE->getNamedConcept(), CSE->getNamedConcept()->getLexicalDeclContext(),
- /*Final=*/false, CSE->getTemplateArguments(),
- /*RelativeToPrimary=*/true,
- /*Pattern=*/nullptr,
- /*ForConstraintInstantiation=*/true);
+bool SubstituteParameterMappings::substitute(ConceptIdConstraint &CC) {
+ assert(CC.getConstraintDecl() && MLTAL && ArgsAsWritten);
- return substituteParameterMappings(S, N, CSE->getNamedConcept(), MLTAL,
- CSE->getTemplateArgsAsWritten());
-}
+ if (substitute(static_cast<NormalizedConstraintWithParamMapping &>(CC)))
+ return true;
-NormalizedConstraint::NormalizedConstraint(ASTContext &C,
- NormalizedConstraint LHS,
- NormalizedConstraint RHS,
- CompoundConstraintKind Kind)
- : Constraint{CompoundConstraint{
- new(C) NormalizedConstraintPair{std::move(LHS), std::move(RHS)},
- Kind}} {}
-
-NormalizedConstraint::NormalizedConstraint(ASTContext &C,
- const NormalizedConstraint &Other) {
- if (Other.isAtomic()) {
- Constraint = new (C) AtomicConstraint(*Other.getAtomicConstraint());
- } else if (Other.isFoldExpanded()) {
- Constraint = new (C) FoldExpandedConstraint(
- Other.getFoldExpandedConstraint()->Kind,
- NormalizedConstraint(C, Other.getFoldExpandedConstraint()->Constraint),
- Other.getFoldExpandedConstraint()->Pattern);
+ auto *CSE = CC.getConceptSpecializationExpr();
+ assert(CSE);
+ assert(!CC.getBeginLoc().isInvalid());
+
+ SourceLocation InstLocBegin, InstLocEnd;
+ if (llvm::ArrayRef Arguments = ArgsAsWritten->arguments();
+ Arguments.empty()) {
+ InstLocBegin = ArgsAsWritten->getLAngleLoc();
+ InstLocEnd = ArgsAsWritten->getRAngleLoc();
} else {
- Constraint = CompoundConstraint(
- new (C)
- NormalizedConstraintPair{NormalizedConstraint(C, Other.getLHS()),
- NormalizedConstraint(C, Other.getRHS())},
- Other.getCompoundKind());
+ auto SR = Arguments[0].getSourceRange();
+ InstLocBegin = SR.getBegin();
+ InstLocEnd = SR.getEnd();
}
-}
+ // This is useful for name lookup across modules; see Sema::getLookupModules.
+ Sema::InstantiatingTemplate Inst(
+ SemaRef, InstLocBegin,
+ Sema::InstantiatingTemplate::ParameterMappingSubstitution{},
+ const_cast<NamedDecl *>(CC.getConstraintDecl()),
+ {InstLocBegin, InstLocEnd});
+ if (Inst.isInvalid())
+ return true;
-NormalizedConstraint &NormalizedConstraint::getLHS() const {
- assert(isCompound() && "getLHS called on a non-compound constraint.");
- return cast<CompoundConstraint>(Constraint).getPointer()->LHS;
+ TemplateArgumentListInfo Out;
+ // TransformTemplateArguments is unable to preserve the source location of a
+ // pack. The SourceLocation is necessary for the instantiation location.
+ // FIXME: The BaseLoc will be used as the location of the pack expansion,
+ // which is wrong.
+ const ASTTemplateArgumentListInfo *ArgsAsWritten =
+ CSE->getTemplateArgsAsWritten();
+ if (SemaRef.SubstTemplateArgumentsInParameterMapping(
+ ArgsAsWritten->arguments(), CC.getBeginLoc(), *MLTAL, Out,
+ /*BuildPackExpansionTypes=*/!InFoldExpr))
+ return true;
+ Sema::CheckTemplateArgumentInfo CTAI;
+ if (SemaRef.CheckTemplateArgumentList(CSE->getNamedConcept(),
+ CSE->getConceptNameInfo().getLoc(), Out,
+ /*DefaultArgs=*/{},
+ /*PartialTemplateArgs=*/false, CTAI,
+ /*UpdateArgsWithConversions=*/false))
+ return true;
+ auto TemplateArgs = *MLTAL;
+ TemplateArgs.replaceOutermostTemplateArguments(
+ TemplateArgs.getAssociatedDecl(0).first, CTAI.SugaredConverted);
+ return SubstituteParameterMappings(SemaRef, &TemplateArgs, ArgsAsWritten,
+ InFoldExpr)
+ .substitute(CC.getNormalizedConstraint());
}
-NormalizedConstraint &NormalizedConstraint::getRHS() const {
- assert(isCompound() && "getRHS called on a non-compound constraint.");
- return cast<CompoundConstraint>(Constraint).getPointer()->RHS;
+bool SubstituteParameterMappings::substitute(NormalizedConstraint &N) {
+ switch (N.getKind()) {
+ case NormalizedConstraint::ConstraintKind::Atomic: {
+ if (!MLTAL) {
+ assert(!ArgsAsWritten);
+ return false;
+ }
+ return substitute(static_cast<NormalizedConstraintWithParamMapping &>(N));
+ }
+ case NormalizedConstraint::ConstraintKind::FoldExpanded: {
+ auto &FE = static_cast<FoldExpandedConstraint &>(N);
+ if (!MLTAL) {
+ llvm::SaveAndRestore _1(InFoldExpr, true);
+ assert(!ArgsAsWritten);
+ return substitute(FE.getNormalizedPattern());
+ }
+ Sema::ArgPackSubstIndexRAII _(SemaRef, std::nullopt);
+ substitute(static_cast<NormalizedConstraintWithParamMapping &>(FE));
+ return SubstituteParameterMappings(SemaRef, /*InFoldExpr=*/true)
+ .substitute(FE.getNormalizedPattern());
+ }
+ case NormalizedConstraint::ConstraintKind::ConceptId: {
+ auto &CC = static_cast<ConceptIdConstraint &>(N);
+ if (MLTAL) {
+ assert(ArgsAsWritten);
+ return substitute(CC);
+ }
+ assert(!ArgsAsWritten);
+ const ConceptSpecializationExpr *CSE = CC.getConceptSpecializationExpr();
+ ConceptDecl *Concept = CSE->getNamedConcept();
+ MultiLevelTemplateArgumentList MLTAL = SemaRef.getTemplateInstantiationArgs(
+ Concept, Concept->getLexicalDeclContext(),
+ /*Final=*/true, CSE->getTemplateArguments(),
+ /*RelativeToPrimary=*/true,
+ /*Pattern=*/nullptr,
+ /*ForConstraintInstantiation=*/true);
+
+ return SubstituteParameterMappings(
+ SemaRef, &MLTAL, CSE->getTemplateArgsAsWritten(), InFoldExpr)
+ .substitute(CC.getNormalizedConstraint());
+ }
+ case NormalizedConstraint::ConstraintKind::Compound: {
+ auto &Compound = static_cast<CompoundConstraint &>(N);
+ if (substitute(Compound.getLHS()))
+ return true;
+ return substitute(Compound.getRHS());
+ }
+ }
}
-std::optional<NormalizedConstraint>
-NormalizedConstraint::fromAssociatedConstraints(
+} // namespace
+
+NormalizedConstraint *NormalizedConstraint::fromAssociatedConstraints(
Sema &S, const NamedDecl *D, ArrayRef<AssociatedConstraint> ACs) {
assert(ACs.size() != 0);
- auto Conjunction = fromConstraintExpr(S, D, ACs[0].ConstraintExpr);
+ auto *Conjunction =
+ fromConstraintExpr(S, D, ACs[0].ConstraintExpr, ACs[0].ArgPackSubstIndex);
if (!Conjunction)
- return std::nullopt;
+ return nullptr;
for (unsigned I = 1; I < ACs.size(); ++I) {
- auto Next = fromConstraintExpr(S, D, ACs[I].ConstraintExpr);
+ auto *Next = fromConstraintExpr(S, D, ACs[I].ConstraintExpr,
+ ACs[I].ArgPackSubstIndex);
if (!Next)
- return std::nullopt;
- *Conjunction = NormalizedConstraint(S.Context, std::move(*Conjunction),
- std::move(*Next), CCK_Conjunction);
+ return nullptr;
+ Conjunction = CompoundConstraint::CreateConjunction(S.getASTContext(),
+ Conjunction, Next);
}
return Conjunction;
}
-std::optional<NormalizedConstraint>
-NormalizedConstraint::fromConstraintExpr(Sema &S, const NamedDecl *D,
- const Expr *E) {
+NormalizedConstraint *NormalizedConstraint::fromConstraintExpr(
+ Sema &S, const NamedDecl *D, const Expr *E, UnsignedOrNone SubstIndex) {
assert(E != nullptr);
// C++ [temp.constr.normal]p1.1
@@ -1597,23 +2170,29 @@ NormalizedConstraint::fromConstraintExpr(Sema &S, const NamedDecl *D,
// [...]
E = E->IgnoreParenImpCasts();
+ llvm::FoldingSetNodeID ID;
+ if (D && DiagRecursiveConstraintEval(S, ID, D, E)) {
+ return nullptr;
+ }
+ SatisfactionStackRAII StackRAII(S, D, ID);
+
// C++2a [temp.param]p4:
// [...] If T is not a pack, then E is E', otherwise E is (E' && ...).
// Fold expression is considered atomic constraints per current wording.
// See http://cplusplus.github.io/concepts-ts/ts-active.html#28
if (LogicalBinOp BO = E) {
- auto LHS = fromConstraintExpr(S, D, BO.getLHS());
+ auto *LHS = fromConstraintExpr(S, D, BO.getLHS(), SubstIndex);
if (!LHS)
- return std::nullopt;
- auto RHS = fromConstraintExpr(S, D, BO.getRHS());
+ return nullptr;
+ auto *RHS = fromConstraintExpr(S, D, BO.getRHS(), SubstIndex);
if (!RHS)
- return std::nullopt;
+ return nullptr;
- return NormalizedConstraint(S.Context, std::move(*LHS), std::move(*RHS),
- BO.isAnd() ? CCK_Conjunction : CCK_Disjunction);
+ return CompoundConstraint::Create(
+ S.Context, LHS, BO.isAnd() ? CCK_Conjunction : CCK_Disjunction, RHS);
} else if (auto *CSE = dyn_cast<const ConceptSpecializationExpr>(E)) {
- const NormalizedConstraint *SubNF;
+ NormalizedConstraint *SubNF;
{
Sema::InstantiatingTemplate Inst(
S, CSE->getExprLoc(),
@@ -1621,7 +2200,7 @@ NormalizedConstraint::fromConstraintExpr(Sema &S, const NamedDecl *D,
// FIXME: improve const-correctness of InstantiatingTemplate
const_cast<NamedDecl *>(D), CSE->getSourceRange());
if (Inst.isInvalid())
- return std::nullopt;
+ return nullptr;
// C++ [temp.constr.normal]p1.1
// [...]
// The normal form of an id-expression of the form C<A1, A2, ..., AN>,
@@ -1631,20 +2210,21 @@ NormalizedConstraint::fromConstraintExpr(Sema &S, const NamedDecl *D,
// constraint. If any such substitution results in an invalid type or
// expression, the program is ill-formed; no diagnostic is required.
// [...]
- ConceptDecl *CD = CSE->getNamedConcept();
- SubNF = S.getNormalizedAssociatedConstraints(
- CD, AssociatedConstraint(CD->getConstraintExpr()));
+
+ // Use canonical declarations to merge ConceptDecls across
+ // different modules.
+ ConceptDecl *CD = CSE->getNamedConcept()->getCanonicalDecl();
+ SubNF = NormalizedConstraint::fromAssociatedConstraints(
+ S, CD, AssociatedConstraint(CD->getConstraintExpr(), SubstIndex));
+
if (!SubNF)
- return std::nullopt;
+ return nullptr;
}
- std::optional<NormalizedConstraint> New;
- New.emplace(S.Context, *SubNF);
-
- if (substituteParameterMappings(S, *New, CSE))
- return std::nullopt;
+ return ConceptIdConstraint::Create(S.getASTContext(),
+ CSE->getConceptReference(), SubNF, D,
+ CSE, SubstIndex);
- return New;
} else if (auto *FE = dyn_cast<const CXXFoldExpr>(E);
FE && S.getLangOpts().CPlusPlus26 &&
(FE->getOperator() == BinaryOperatorKind::BO_LAnd ||
@@ -1658,31 +2238,61 @@ NormalizedConstraint::fromConstraintExpr(Sema &S, const NamedDecl *D,
: FoldExpandedConstraint::FoldOperatorKind::Or;
if (FE->getInit()) {
- auto LHS = fromConstraintExpr(S, D, FE->getLHS());
- auto RHS = fromConstraintExpr(S, D, FE->getRHS());
+ auto *LHS = fromConstraintExpr(S, D, FE->getLHS(), SubstIndex);
+ auto *RHS = fromConstraintExpr(S, D, FE->getRHS(), SubstIndex);
if (!LHS || !RHS)
- return std::nullopt;
+ return nullptr;
if (FE->isRightFold())
- RHS = NormalizedConstraint{new (S.Context) FoldExpandedConstraint{
- Kind, std::move(*RHS), FE->getPattern()}};
+ LHS = FoldExpandedConstraint::Create(S.getASTContext(),
+ FE->getPattern(), D, Kind, LHS);
else
- LHS = NormalizedConstraint{new (S.Context) FoldExpandedConstraint{
- Kind, std::move(*LHS), FE->getPattern()}};
-
- return NormalizedConstraint(
- S.Context, std::move(*LHS), std::move(*RHS),
- FE->getOperator() == BinaryOperatorKind::BO_LAnd ? CCK_Conjunction
- : CCK_Disjunction);
+ RHS = FoldExpandedConstraint::Create(S.getASTContext(),
+ FE->getPattern(), D, Kind, RHS);
+
+ return CompoundConstraint::Create(
+ S.getASTContext(), LHS,
+ (FE->getOperator() == BinaryOperatorKind::BO_LAnd ? CCK_Conjunction
+ : CCK_Disjunction),
+ RHS);
}
- auto Sub = fromConstraintExpr(S, D, FE->getPattern());
+ auto *Sub = fromConstraintExpr(S, D, FE->getPattern(), SubstIndex);
if (!Sub)
- return std::nullopt;
- return NormalizedConstraint{new (S.Context) FoldExpandedConstraint{
- Kind, std::move(*Sub), FE->getPattern()}};
+ return nullptr;
+ return FoldExpandedConstraint::Create(S.getASTContext(), FE->getPattern(),
+ D, Kind, Sub);
}
+ return AtomicConstraint::Create(S.getASTContext(), E, D, SubstIndex);
+}
- return NormalizedConstraint{new (S.Context) AtomicConstraint(E, D)};
+const NormalizedConstraint *Sema::getNormalizedAssociatedConstraints(
+ ConstrainedDeclOrNestedRequirement ConstrainedDeclOrNestedReq,
+ ArrayRef<AssociatedConstraint> AssociatedConstraints) {
+ if (!ConstrainedDeclOrNestedReq) {
+ auto *Normalized = NormalizedConstraint::fromAssociatedConstraints(
+ *this, nullptr, AssociatedConstraints);
+ if (!Normalized ||
+ SubstituteParameterMappings(*this).substitute(*Normalized))
+ return nullptr;
+
+ return Normalized;
+ }
+
+ // FIXME: ConstrainedDeclOrNestedReq is never a NestedRequirement!
+ const NamedDecl *ND =
+ ConstrainedDeclOrNestedReq.dyn_cast<const NamedDecl *>();
+ auto CacheEntry = NormalizationCache.find(ConstrainedDeclOrNestedReq);
+ if (CacheEntry == NormalizationCache.end()) {
+ auto *Normalized = NormalizedConstraint::fromAssociatedConstraints(
+ *this, ND, AssociatedConstraints);
+ CacheEntry =
+ NormalizationCache.try_emplace(ConstrainedDeclOrNestedReq, Normalized)
+ .first;
+ if (!Normalized ||
+ SubstituteParameterMappings(*this).substitute(*Normalized))
+ return nullptr;
+ }
+ return CacheEntry->second;
}
bool FoldExpandedConstraint::AreCompatibleForSubsumption(
@@ -1693,8 +2303,10 @@ bool FoldExpandedConstraint::AreCompatibleForSubsumption(
// if their respective constraints both contain an equivalent unexpanded pack.
llvm::SmallVector<UnexpandedParameterPack> APacks, BPacks;
- Sema::collectUnexpandedParameterPacks(const_cast<Expr *>(A.Pattern), APacks);
- Sema::collectUnexpandedParameterPacks(const_cast<Expr *>(B.Pattern), BPacks);
+ Sema::collectUnexpandedParameterPacks(const_cast<Expr *>(A.getPattern()),
+ APacks);
+ Sema::collectUnexpandedParameterPacks(const_cast<Expr *>(B.getPattern()),
+ BPacks);
for (const UnexpandedParameterPack &APack : APacks) {
auto ADI = getDepthAndIndex(APack);
@@ -1788,7 +2400,7 @@ bool Sema::MaybeEmitAmbiguousAtomicConstraintsDiagnostic(
const AtomicConstraint &B) {
if (!A.hasMatchingParameterMapping(Context, B))
return false;
- const Expr *EA = A.ConstraintExpr, *EB = B.ConstraintExpr;
+ const Expr *EA = A.getConstraintExpr(), *EB = B.getConstraintExpr();
if (EA == EB)
return true;
@@ -1841,24 +2453,6 @@ bool Sema::MaybeEmitAmbiguousAtomicConstraintsDiagnostic(
return true;
}
-NormalizedConstraint::CompoundConstraintKind
-NormalizedConstraint::getCompoundKind() const {
- assert(isCompound() && "getCompoundKind on a non-compound constraint..");
- return cast<CompoundConstraint>(Constraint).getInt();
-}
-
-AtomicConstraint *NormalizedConstraint::getAtomicConstraint() const {
- assert(isAtomic() && "getAtomicConstraint called on non-atomic constraint.");
- return cast<AtomicConstraint *>(Constraint);
-}
-
-FoldExpandedConstraint *
-NormalizedConstraint::getFoldExpandedConstraint() const {
- assert(isFoldExpanded() &&
- "getFoldExpandedConstraint called on non-fold-expanded constraint.");
- return cast<FoldExpandedConstraint *>(Constraint);
-}
-
//
//
// ------------------------ Subsumption -----------------------------------
@@ -1874,8 +2468,8 @@ uint16_t SubsumptionChecker::getNewLiteralId() {
return NextID++;
}
-auto SubsumptionChecker::find(AtomicConstraint *Ori) -> Literal {
- auto &Elems = AtomicMap[Ori->ConstraintExpr];
+auto SubsumptionChecker::find(const AtomicConstraint *Ori) -> Literal {
+ auto &Elems = AtomicMap[Ori->getConstraintExpr()];
// C++ [temp.constr.order] p2
// - an atomic constraint A subsumes another atomic constraint B
// if and only if the A and B are identical [...]
@@ -1891,13 +2485,16 @@ auto SubsumptionChecker::find(AtomicConstraint *Ori) -> Literal {
// subsumes another, their literal will be the same
llvm::FoldingSetNodeID ID;
- const auto &Mapping = Ori->ParameterMapping;
- ID.AddBoolean(Mapping.has_value());
- if (Mapping) {
- for (const TemplateArgumentLoc &TAL : *Mapping) {
- SemaRef.getASTContext()
- .getCanonicalTemplateArgument(TAL.getArgument())
- .Profile(ID, SemaRef.getASTContext());
+ ID.AddBoolean(Ori->hasParameterMapping());
+ if (Ori->hasParameterMapping()) {
+ const auto &Mapping = Ori->getParameterMapping();
+ const NormalizedConstraint::OccurenceList &Indexes =
+ Ori->mappingOccurenceListForSubsumption();
+ for (auto [Idx, TAL] : llvm::enumerate(Mapping)) {
+ if (Indexes[Idx])
+ SemaRef.getASTContext()
+ .getCanonicalTemplateArgument(TAL.getArgument())
+ .Profile(ID, SemaRef.getASTContext());
}
}
auto It = Elems.find(ID);
@@ -1912,11 +2509,11 @@ auto SubsumptionChecker::find(AtomicConstraint *Ori) -> Literal {
return It->getSecond().ID;
}
-auto SubsumptionChecker::find(FoldExpandedConstraint *Ori) -> Literal {
- auto &Elems = FoldMap[Ori->Pattern];
+auto SubsumptionChecker::find(const FoldExpandedConstraint *Ori) -> Literal {
+ auto &Elems = FoldMap[Ori->getPattern()];
FoldExpendedConstraintKey K;
- K.Kind = Ori->Kind;
+ K.Kind = Ori->getFoldOperator();
auto It = llvm::find_if(Elems, [&K](const FoldExpendedConstraintKey &Other) {
return K.Kind == Other.Kind;
@@ -1960,38 +2557,47 @@ FormulaType SubsumptionChecker::Normalize(const NormalizedConstraint &NC) {
AddUniqueClauseToFormula(Res, std::move(C));
};
- if (NC.isAtomic())
- return {{find(NC.getAtomicConstraint())}};
+ switch (NC.getKind()) {
- if (NC.isFoldExpanded())
- return {{find(NC.getFoldExpandedConstraint())}};
+ case NormalizedConstraint::ConstraintKind::Atomic:
+ return {{find(&static_cast<const AtomicConstraint &>(NC))}};
- FormulaType Left, Right;
- SemaRef.runWithSufficientStackSpace(SourceLocation(), [&] {
- Left = Normalize<FormulaType>(NC.getLHS());
- Right = Normalize<FormulaType>(NC.getRHS());
- });
+ case NormalizedConstraint::ConstraintKind::FoldExpanded:
+ return {{find(&static_cast<const FoldExpandedConstraint &>(NC))}};
- if (NC.getCompoundKind() == FormulaType::Kind) {
- auto SizeLeft = Left.size();
- Res = std::move(Left);
- Res.reserve(SizeLeft + Right.size());
- std::for_each(std::make_move_iterator(Right.begin()),
- std::make_move_iterator(Right.end()), Add);
- return Res;
- }
+ case NormalizedConstraint::ConstraintKind::ConceptId:
+ return Normalize<FormulaType>(
+ static_cast<const ConceptIdConstraint &>(NC).getNormalizedConstraint());
+
+ case NormalizedConstraint::ConstraintKind::Compound: {
+ const auto &Compound = static_cast<const CompoundConstraint &>(NC);
+ FormulaType Left, Right;
+ SemaRef.runWithSufficientStackSpace(SourceLocation(), [&] {
+ Left = Normalize<FormulaType>(Compound.getLHS());
+ Right = Normalize<FormulaType>(Compound.getRHS());
+ });
+
+ if (Compound.getCompoundKind() == FormulaType::Kind) {
+ Res = std::move(Left);
+ Res.reserve(Left.size() + Right.size());
+ std::for_each(std::make_move_iterator(Right.begin()),
+ std::make_move_iterator(Right.end()), Add);
+ return Res;
+ }
- Res.reserve(Left.size() * Right.size());
- for (const auto &LTransform : Left) {
- for (const auto &RTransform : Right) {
- Clause Combined;
- Combined.reserve(LTransform.size() + RTransform.size());
- llvm::append_range(Combined, LTransform);
- llvm::append_range(Combined, RTransform);
- Add(std::move(Combined));
+ Res.reserve(Left.size() * Right.size());
+ for (const auto &LTransform : Left) {
+ for (const auto &RTransform : Right) {
+ Clause Combined;
+ Combined.reserve(LTransform.size() + RTransform.size());
+ llvm::copy(LTransform, std::back_inserter(Combined));
+ llvm::copy(RTransform, std::back_inserter(Combined));
+ Add(std::move(Combined));
+ }
}
+ return Res;
+ }
}
- return Res;
}
void SubsumptionChecker::AddUniqueClauseToFormula(Formula &F, Clause C) {
@@ -2006,12 +2612,12 @@ std::optional<bool> SubsumptionChecker::Subsumes(
const NamedDecl *DP, ArrayRef<AssociatedConstraint> P, const NamedDecl *DQ,
ArrayRef<AssociatedConstraint> Q) {
const NormalizedConstraint *PNormalized =
- getNormalizedAssociatedConstraints(SemaRef, DP, P);
+ SemaRef.getNormalizedAssociatedConstraints(DP, P);
if (!PNormalized)
return std::nullopt;
const NormalizedConstraint *QNormalized =
- getNormalizedAssociatedConstraints(SemaRef, DQ, Q);
+ SemaRef.getNormalizedAssociatedConstraints(DQ, Q);
if (!QNormalized)
return std::nullopt;
@@ -2061,9 +2667,9 @@ bool SubsumptionChecker::Subsumes(const FoldExpandedConstraint *A,
// constraint B if they are compatible for subsumption, have the same
// fold-operator, and the constraint of A subsumes that of B.
bool DoesSubsume =
- A->Kind == B->Kind &&
+ A->getFoldOperator() == B->getFoldOperator() &&
FoldExpandedConstraint::AreCompatibleForSubsumption(*A, *B) &&
- Subsumes(&A->Constraint, &B->Constraint);
+ Subsumes(&A->getNormalizedPattern(), &B->getNormalizedPattern());
It = FoldSubsumptionCache.try_emplace(std::move(Key), DoesSubsume).first;
}
return It->second;
diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp
index 1131e1f..d27f767 100644
--- a/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/clang/lib/Sema/SemaDeclCXX.cpp
@@ -13660,7 +13660,7 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
if (Cxx20Enumerator) {
Diag(NameLoc, diag::warn_cxx17_compat_using_decl_non_member_enumerator)
- << SS.getRange();
+ << SS.getScopeRep() << SS.getRange();
return false;
}
@@ -17876,13 +17876,15 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
findFailedBooleanCondition(Converted.get());
if (const auto *ConceptIDExpr =
dyn_cast_or_null<ConceptSpecializationExpr>(InnerCond)) {
- // Drill down into concept specialization expressions to see why they
- // weren't satisfied.
- Diag(AssertExpr->getBeginLoc(), diag::err_static_assert_failed)
- << !HasMessage << Msg.str() << AssertExpr->getSourceRange();
- ConstraintSatisfaction Satisfaction;
- if (!CheckConstraintSatisfaction(ConceptIDExpr, Satisfaction))
- DiagnoseUnsatisfiedConstraint(Satisfaction);
+ const ASTConstraintSatisfaction &Satisfaction =
+ ConceptIDExpr->getSatisfaction();
+ if (!Satisfaction.ContainsErrors || Satisfaction.NumRecords) {
+ Diag(AssertExpr->getBeginLoc(), diag::err_static_assert_failed)
+ << !HasMessage << Msg.str() << AssertExpr->getSourceRange();
+ // Drill down into concept specialization expressions to see why they
+ // weren't satisfied.
+ DiagnoseUnsatisfiedConstraint(ConceptIDExpr);
+ }
} else if (InnerCond && !isa<CXXBoolLiteralExpr>(InnerCond) &&
!isa<IntegerLiteral>(InnerCond)) {
Diag(InnerCond->getBeginLoc(),
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 06b2529..4d3c7d6 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -20107,9 +20107,10 @@ static void DoMarkVarDeclReferenced(
bool NeededForConstantEvaluation =
isPotentiallyConstantEvaluatedContext(SemaRef) && UsableInConstantExpr;
- bool NeedDefinition = OdrUse == OdrUseContext::Used ||
- NeededForConstantEvaluation ||
- Var->getType()->isUndeducedType();
+ bool NeedDefinition =
+ OdrUse == OdrUseContext::Used || NeededForConstantEvaluation ||
+ (TSK != clang::TSK_Undeclared && !UsableInConstantExpr &&
+ Var->getType()->isUndeducedType());
assert(!isa<VarTemplatePartialSpecializationDecl>(Var) &&
"Can't instantiate a partial template specialization.");
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index 779ccf5..0fe242dce 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -1251,6 +1251,10 @@ Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
else
Record = cast<CXXRecordDecl>(ContextDecl);
+ // 'this' never refers to the lambda class itself.
+ if (Record->isLambda())
+ return;
+
QualType T = S.Context.getCanonicalTagType(Record);
T = S.getASTContext().getQualifiedType(T, CXXThisTypeQuals);
@@ -7931,21 +7935,27 @@ Sema::BuildExprRequirement(
// be satisfied.
TemplateParameterList *TPL =
ReturnTypeRequirement.getTypeConstraintTemplateParameterList();
- QualType MatchedType =
- Context.getReferenceQualifiedType(E).getCanonicalType();
+ QualType MatchedType = Context.getReferenceQualifiedType(E);
llvm::SmallVector<TemplateArgument, 1> Args;
Args.push_back(TemplateArgument(MatchedType));
auto *Param = cast<TemplateTypeParmDecl>(TPL->getParam(0));
- MultiLevelTemplateArgumentList MLTAL(Param, Args, /*Final=*/false);
+ MultiLevelTemplateArgumentList MLTAL(Param, Args, /*Final=*/true);
MLTAL.addOuterRetainedLevels(TPL->getDepth());
const TypeConstraint *TC = Param->getTypeConstraint();
assert(TC && "Type Constraint cannot be null here");
auto *IDC = TC->getImmediatelyDeclaredConstraint();
assert(IDC && "ImmediatelyDeclaredConstraint can't be null here.");
ExprResult Constraint = SubstExpr(IDC, MLTAL);
- if (Constraint.isInvalid()) {
+ bool HasError = Constraint.isInvalid();
+ if (!HasError) {
+ SubstitutedConstraintExpr =
+ cast<ConceptSpecializationExpr>(Constraint.get());
+ if (SubstitutedConstraintExpr->getSatisfaction().ContainsErrors)
+ HasError = true;
+ }
+ if (HasError) {
return new (Context) concepts::ExprRequirement(
createSubstDiagAt(IDC->getExprLoc(),
[&](llvm::raw_ostream &OS) {
@@ -7954,8 +7964,6 @@ Sema::BuildExprRequirement(
}),
IsSimple, NoexceptLoc, ReturnTypeRequirement);
}
- SubstitutedConstraintExpr =
- cast<ConceptSpecializationExpr>(Constraint.get());
if (!SubstitutedConstraintExpr->isSatisfied())
Status = concepts::ExprRequirement::SS_ConstraintsNotSatisfied;
}
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 129b03c..fa30c66b 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -1810,6 +1810,13 @@ bool clang::CreateHLSLAttributedResourceType(
}
ResAttrs.RawBuffer = true;
break;
+ case attr::HLSLIsCounter:
+ if (ResAttrs.IsCounter) {
+ S.Diag(A->getLocation(), diag::warn_duplicate_attribute_exact) << A;
+ return false;
+ }
+ ResAttrs.IsCounter = true;
+ break;
case attr::HLSLContainedType: {
const HLSLContainedTypeAttr *CTAttr = cast<HLSLContainedTypeAttr>(A);
QualType Ty = CTAttr->getType();
@@ -1902,6 +1909,10 @@ bool SemaHLSL::handleResourceTypeAttr(QualType T, const ParsedAttr &AL) {
A = HLSLRawBufferAttr::Create(getASTContext(), ACI);
break;
+ case ParsedAttr::AT_HLSLIsCounter:
+ A = HLSLIsCounterAttr::Create(getASTContext(), ACI);
+ break;
+
case ParsedAttr::AT_HLSLContainedType: {
if (AL.getNumArgs() != 1 && !AL.hasParsedType()) {
Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1;
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index c971293..0d0d2c0 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -8219,8 +8219,8 @@ ExprResult InitializationSequence::Perform(Sema &S,
// InitializeTemporary entity for our target type.
QualType Ty = Step->Type;
bool IsTemporary = !S.Context.hasSameType(Entity.getType(), Ty);
- InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(Ty);
- InitializedEntity InitEntity = IsTemporary ? TempEntity : Entity;
+ InitializedEntity InitEntity =
+ IsTemporary ? InitializedEntity::InitializeTemporary(Ty) : Entity;
InitListChecker PerformInitList(S, InitEntity,
InitList, Ty, /*VerifyOnly=*/false,
/*TreatUnavailableAsInvalid=*/false);
@@ -8242,7 +8242,6 @@ ExprResult InitializationSequence::Perform(Sema &S,
InitListExpr *StructuredInitList =
PerformInitList.getFullyStructuredList();
- CurInit.get();
CurInit = shouldBindAsTemporary(InitEntity)
? S.MaybeBindToTemporary(StructuredInitList)
: StructuredInitList;
diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp
index a64f207..9aaf7f4 100644
--- a/clang/lib/Sema/SemaOpenACC.cpp
+++ b/clang/lib/Sema/SemaOpenACC.cpp
@@ -2789,7 +2789,7 @@ OpenACCPrivateRecipe SemaOpenACC::CreatePrivateInitRecipe(const Expr *VarExpr) {
AllocaDecl->setInitStyle(VarDecl::CallInit);
}
- return OpenACCPrivateRecipe(AllocaDecl, Init.get());
+ return OpenACCPrivateRecipe(AllocaDecl);
}
OpenACCFirstPrivateRecipe
@@ -2828,7 +2828,14 @@ SemaOpenACC::CreateFirstPrivateInitRecipe(const Expr *VarExpr) {
if (!ArrTy) {
ExprResult Init = FinishValueInit(
SemaRef.SemaRef, Entity, VarExpr->getBeginLoc(), VarTy, TemporaryDRE);
- return OpenACCFirstPrivateRecipe(AllocaDecl, Init.get(), Temporary);
+
+ // For 'no bounds' version, we can use this as a shortcut, so set the init
+ // anyway.
+ if (Init.isUsable()) {
+ AllocaDecl->setInit(Init.get());
+ AllocaDecl->setInitStyle(VarDecl::CallInit);
+ }
+ return OpenACCFirstPrivateRecipe(AllocaDecl, Temporary);
}
// Arrays need to have each individual element initialized as there
@@ -2875,8 +2882,16 @@ SemaOpenACC::CreateFirstPrivateInitRecipe(const Expr *VarExpr) {
ExprResult Init = FinishValueInit(SemaRef.SemaRef, Entity,
VarExpr->getBeginLoc(), VarTy, InitExpr);
- return OpenACCFirstPrivateRecipe(AllocaDecl, Init.get(), Temporary);
+ // For 'no bounds' version, we can use this as a shortcut, so set the init
+ // anyway.
+ if (Init.isUsable()) {
+ AllocaDecl->setInit(Init.get());
+ AllocaDecl->setInitStyle(VarDecl::CallInit);
+ }
+
+ return OpenACCFirstPrivateRecipe(AllocaDecl, Temporary);
}
+
OpenACCReductionRecipe SemaOpenACC::CreateReductionInitRecipe(
OpenACCReductionOperator ReductionOperator, const Expr *VarExpr) {
// TODO: OpenACC: This shouldn't be necessary, see PrivateInitRecipe
@@ -2932,5 +2947,12 @@ OpenACCReductionRecipe SemaOpenACC::CreateReductionInitRecipe(
ExprResult Init = FinishValueInit(SemaRef.SemaRef, Entity,
VarExpr->getBeginLoc(), VarTy, InitExpr);
- return OpenACCReductionRecipe(AllocaDecl, Init.get());
+
+ // For 'no bounds' version, we can use this as a shortcut, so set the init
+ // anyway.
+ if (Init.isUsable()) {
+ AllocaDecl->setInit(Init.get());
+ AllocaDecl->setInitStyle(VarDecl::CallInit);
+ }
+ return OpenACCReductionRecipe(AllocaDecl);
}
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index ea5c4265..b870114 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -804,7 +804,7 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
case TemplateDeductionResult::ConstraintsNotSatisfied: {
CNSInfo *Saved = new (Context) CNSInfo;
Saved->TemplateArgs = Info.takeSugared();
- Saved->Satisfaction = Info.AssociatedConstraintsSatisfaction;
+ Saved->Satisfaction = std::move(Info.AssociatedConstraintsSatisfaction);
Result.Data = Saved;
break;
}
@@ -852,6 +852,7 @@ void DeductionFailureInfo::Destroy() {
case TemplateDeductionResult::ConstraintsNotSatisfied:
// FIXME: Destroy the template argument list?
+ static_cast<CNSInfo *>(Data)->Satisfaction.~ConstraintSatisfaction();
Data = nullptr;
if (PartialDiagnosticAt *Diag = getSFINAEDiagnostic()) {
Diag->~PartialDiagnosticAt();
@@ -12739,7 +12740,8 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
<< (unsigned)FnKindPair.first << (unsigned)ocs_non_template
<< FnDesc /* Ignored */;
ConstraintSatisfaction Satisfaction;
- if (S.CheckFunctionConstraints(Fn, Satisfaction))
+ if (S.CheckFunctionConstraints(Fn, Satisfaction, SourceLocation(),
+ /*ForOverloadResolution=*/true))
break;
S.DiagnoseUnsatisfiedConstraint(Satisfaction);
}
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index 2bf1511..dcf2876 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -9,6 +9,7 @@
//===----------------------------------------------------------------------===//
#include "TreeTransform.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
@@ -1222,8 +1223,9 @@ static ExprResult formImmediatelyDeclaredConstraint(
if (auto *CD = dyn_cast<ConceptDecl>(NamedConcept)) {
ImmediatelyDeclaredConstraint = S.CheckConceptTemplateId(
SS, /*TemplateKWLoc=*/SourceLocation(), NameInfo,
- /*FoundDecl=*/FoundDecl ? FoundDecl : NamedConcept, CD,
- &ConstraintArgs);
+ /*FoundDecl=*/FoundDecl ? FoundDecl : CD, CD, &ConstraintArgs,
+ /*DoCheckConstraintSatisfaction=*/
+ !S.inParameterMappingSubstitution());
}
// We have a template template parameter
else {
@@ -4850,13 +4852,11 @@ void Sema::diagnoseMissingTemplateArguments(const CXXScopeSpec &SS,
diagnoseMissingTemplateArguments(Name, Loc);
}
-ExprResult
-Sema::CheckConceptTemplateId(const CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- const DeclarationNameInfo &ConceptNameInfo,
- NamedDecl *FoundDecl,
- ConceptDecl *NamedConcept,
- const TemplateArgumentListInfo *TemplateArgs) {
+ExprResult Sema::CheckConceptTemplateId(
+ const CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl,
+ TemplateDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs,
+ bool DoCheckConstraintSatisfaction) {
assert(NamedConcept && "A concept template id without a template?");
if (NamedConcept->isInvalidDecl())
@@ -4873,33 +4873,48 @@ Sema::CheckConceptTemplateId(const CXXScopeSpec &SS,
DiagnoseUseOfDecl(NamedConcept, ConceptNameInfo.getLoc());
+ // There's a bug with CTAI.CanonicalConverted.
+ // If the template argument contains a DependentDecltypeType that includes a
+ // TypeAliasType, and the same written type had occurred previously in the
+ // source, then the DependentDecltypeType would be canonicalized to that
+ // previous type which would mess up the substitution.
+ // FIXME: Reland https://github.com/llvm/llvm-project/pull/101782 properly!
auto *CSD = ImplicitConceptSpecializationDecl::Create(
Context, NamedConcept->getDeclContext(), NamedConcept->getLocation(),
- CTAI.CanonicalConverted);
+ CTAI.SugaredConverted);
ConstraintSatisfaction Satisfaction;
bool AreArgsDependent =
TemplateSpecializationType::anyDependentTemplateArguments(
- *TemplateArgs, CTAI.CanonicalConverted);
- MultiLevelTemplateArgumentList MLTAL(NamedConcept, CTAI.CanonicalConverted,
+ *TemplateArgs, CTAI.SugaredConverted);
+ MultiLevelTemplateArgumentList MLTAL(NamedConcept, CTAI.SugaredConverted,
/*Final=*/false);
- LocalInstantiationScope Scope(*this);
-
- EnterExpressionEvaluationContext EECtx{
- *this, ExpressionEvaluationContext::Unevaluated, CSD};
-
- if (!AreArgsDependent &&
- CheckConstraintSatisfaction(
- NamedConcept, AssociatedConstraint(NamedConcept->getConstraintExpr()),
- MLTAL,
- SourceRange(SS.isSet() ? SS.getBeginLoc() : ConceptNameInfo.getLoc(),
- TemplateArgs->getRAngleLoc()),
- Satisfaction))
- return ExprError();
auto *CL = ConceptReference::Create(
Context,
SS.isSet() ? SS.getWithLocInContext(Context) : NestedNameSpecifierLoc{},
TemplateKWLoc, ConceptNameInfo, FoundDecl, NamedConcept,
ASTTemplateArgumentListInfo::Create(Context, *TemplateArgs));
+
+ bool Error = false;
+ if (const auto *Concept = dyn_cast<ConceptDecl>(NamedConcept);
+ Concept && Concept->getConstraintExpr() && !AreArgsDependent &&
+ DoCheckConstraintSatisfaction) {
+
+ LocalInstantiationScope Scope(*this);
+
+ EnterExpressionEvaluationContext EECtx{
+ *this, ExpressionEvaluationContext::Unevaluated, CSD};
+
+ Error = CheckConstraintSatisfaction(
+ NamedConcept, AssociatedConstraint(Concept->getConstraintExpr()), MLTAL,
+ SourceRange(SS.isSet() ? SS.getBeginLoc() : ConceptNameInfo.getLoc(),
+ TemplateArgs->getRAngleLoc()),
+ Satisfaction, CL);
+ Satisfaction.ContainsErrors = Error;
+ }
+
+ if (Error)
+ return ExprError();
+
return ConceptSpecializationExpr::Create(
Context, CL, CSD, AreArgsDependent ? nullptr : &Satisfaction);
}
@@ -5217,10 +5232,11 @@ bool Sema::CheckTemplateTypeArgument(
}
default: {
// We allow instantiating a template with template argument packs when
- // building deduction guides.
+ // building deduction guides or mapping constraint template parameters.
if (Arg.getKind() == TemplateArgument::Pack &&
- CodeSynthesisContexts.back().Kind ==
- Sema::CodeSynthesisContext::BuildingDeductionGuides) {
+ (CodeSynthesisContexts.back().Kind ==
+ Sema::CodeSynthesisContext::BuildingDeductionGuides ||
+ inParameterMappingSubstitution())) {
SugaredConverted.push_back(Arg);
CanonicalConverted.push_back(Arg);
return false;
@@ -5813,6 +5829,20 @@ bool Sema::CheckTemplateArgumentList(
TemplateArgumentListInfo &TemplateArgs, const DefaultArguments &DefaultArgs,
bool PartialTemplateArgs, CheckTemplateArgumentInfo &CTAI,
bool UpdateArgsWithConversions, bool *ConstraintsNotSatisfied) {
+ return CheckTemplateArgumentList(
+ Template, GetTemplateParameterList(Template), TemplateLoc, TemplateArgs,
+ DefaultArgs, PartialTemplateArgs, CTAI, UpdateArgsWithConversions,
+ ConstraintsNotSatisfied);
+}
+
+/// Check that the given template argument list is well-formed
+/// for specializing the given template.
+bool Sema::CheckTemplateArgumentList(
+ TemplateDecl *Template, TemplateParameterList *Params,
+ SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs,
+ const DefaultArguments &DefaultArgs, bool PartialTemplateArgs,
+ CheckTemplateArgumentInfo &CTAI, bool UpdateArgsWithConversions,
+ bool *ConstraintsNotSatisfied) {
if (ConstraintsNotSatisfied)
*ConstraintsNotSatisfied = false;
@@ -5822,8 +5852,6 @@ bool Sema::CheckTemplateArgumentList(
// template.
TemplateArgumentListInfo NewArgs = TemplateArgs;
- TemplateParameterList *Params = GetTemplateParameterList(Template);
-
SourceLocation RAngleLoc = NewArgs.getRAngleLoc();
// C++23 [temp.arg.general]p1:
@@ -6163,11 +6191,12 @@ bool Sema::CheckTemplateArgumentList(
CXXThisScopeRAII Scope(*this, RD, ThisQuals, RD != nullptr);
MultiLevelTemplateArgumentList MLTAL = getTemplateInstantiationArgs(
- Template, NewContext, /*Final=*/false, CTAI.CanonicalConverted,
+ Template, NewContext, /*Final=*/true, CTAI.SugaredConverted,
/*RelativeToPrimary=*/true,
/*Pattern=*/nullptr,
/*ForConceptInstantiation=*/true);
- if (EnsureTemplateArgumentListConstraints(
+ if (!isa<ConceptDecl>(Template) &&
+ EnsureTemplateArgumentListConstraints(
Template, MLTAL,
SourceRange(TemplateLoc, TemplateArgs.getRAngleLoc()))) {
if (ConstraintsNotSatisfied)
diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp
index f6ee745..6bba505 100644
--- a/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -3206,7 +3206,7 @@ CheckDeducedArgumentConstraints(Sema &S, NamedDecl *Template,
// If we don't need to replace the deduced template arguments,
// we can add them immediately as the inner-most argument list.
if (!DeducedArgsNeedReplacement)
- Innermost = CanonicalDeducedArgs;
+ Innermost = SugaredDeducedArgs;
MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
Template, Template->getDeclContext(), /*Final=*/false, Innermost,
@@ -3218,7 +3218,7 @@ CheckDeducedArgumentConstraints(Sema &S, NamedDecl *Template,
// not class-scope explicit specialization, so replace with Deduced Args
// instead of adding to inner-most.
if (!Innermost)
- MLTAL.replaceInnermostTemplateArguments(Template, CanonicalDeducedArgs);
+ MLTAL.replaceInnermostTemplateArguments(Template, SugaredDeducedArgs);
if (S.CheckConstraintSatisfaction(Template, AssociatedConstraints, MLTAL,
Info.getLocation(),
@@ -3995,11 +3995,12 @@ TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
if (CheckFunctionTemplateConstraints(
Info.getLocation(),
FunctionTemplate->getCanonicalDecl()->getTemplatedDecl(),
- CTAI.CanonicalConverted, Info.AssociatedConstraintsSatisfaction))
+ CTAI.SugaredConverted, Info.AssociatedConstraintsSatisfaction))
return TemplateDeductionResult::MiscellaneousDeductionFailure;
if (!Info.AssociatedConstraintsSatisfaction.IsSatisfied) {
- Info.reset(Info.takeSugared(), TemplateArgumentList::CreateCopy(
- Context, CTAI.CanonicalConverted));
+ Info.reset(
+ TemplateArgumentList::CreateCopy(Context, CTAI.SugaredConverted),
+ Info.takeCanonical());
return TemplateDeductionResult::ConstraintsNotSatisfied;
}
}
@@ -5167,8 +5168,8 @@ static bool CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type,
/*DefaultArgs=*/{},
/*PartialTemplateArgs=*/false, CTAI))
return true;
- MultiLevelTemplateArgumentList MLTAL(Concept, CTAI.CanonicalConverted,
- /*Final=*/false);
+ MultiLevelTemplateArgumentList MLTAL(Concept, CTAI.SugaredConverted,
+ /*Final=*/true);
// Build up an EvaluationContext with an ImplicitConceptSpecializationDecl so
// that the template arguments of the constraint can be preserved. For
// example:
@@ -5182,7 +5183,7 @@ static bool CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type,
S, Sema::ExpressionEvaluationContext::Unevaluated,
ImplicitConceptSpecializationDecl::Create(
S.getASTContext(), Concept->getDeclContext(), Concept->getLocation(),
- CTAI.CanonicalConverted));
+ CTAI.SugaredConverted));
if (S.CheckConstraintSatisfaction(
Concept, AssociatedConstraint(Concept->getConstraintExpr()), MLTAL,
TypeLoc.getLocalSourceRange(), Satisfaction))
@@ -6676,10 +6677,11 @@ namespace {
struct MarkUsedTemplateParameterVisitor : DynamicRecursiveASTVisitor {
llvm::SmallBitVector &Used;
unsigned Depth;
+ bool VisitDeclRefTypes = true;
- MarkUsedTemplateParameterVisitor(llvm::SmallBitVector &Used,
- unsigned Depth)
- : Used(Used), Depth(Depth) { }
+ MarkUsedTemplateParameterVisitor(llvm::SmallBitVector &Used, unsigned Depth,
+ bool VisitDeclRefTypes = true)
+ : Used(Used), Depth(Depth), VisitDeclRefTypes(VisitDeclRefTypes) {}
bool VisitTemplateTypeParmType(TemplateTypeParmType *T) override {
if (T->getDepth() == Depth)
@@ -6700,6 +6702,8 @@ struct MarkUsedTemplateParameterVisitor : DynamicRecursiveASTVisitor {
if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(E->getDecl()))
if (NTTP->getDepth() == Depth)
Used[NTTP->getIndex()] = true;
+ if (VisitDeclRefTypes)
+ DynamicRecursiveASTVisitor::TraverseType(E->getType());
return true;
}
@@ -7043,10 +7047,13 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
break;
case Type::UnaryTransform:
- if (!OnlyDeduced)
- MarkUsedTemplateParameters(Ctx,
- cast<UnaryTransformType>(T)->getUnderlyingType(),
- OnlyDeduced, Depth, Used);
+ if (!OnlyDeduced) {
+ auto *UTT = cast<UnaryTransformType>(T);
+ auto Next = UTT->getUnderlyingType();
+ if (Next.isNull())
+ Next = UTT->getBaseType();
+ MarkUsedTemplateParameters(Ctx, Next, OnlyDeduced, Depth, Used);
+ }
break;
case Type::PackExpansion:
@@ -7146,6 +7153,12 @@ Sema::MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
::MarkUsedTemplateParameters(Context, E, OnlyDeduced, Depth, Used);
}
+void Sema::MarkUsedTemplateParametersForSubsumptionParameterMapping(
+ const Expr *E, unsigned Depth, llvm::SmallBitVector &Used) {
+ MarkUsedTemplateParameterVisitor(Used, Depth, /*VisitDeclRefTypes=*/false)
+ .TraverseStmt(const_cast<Expr *>(E));
+}
+
void
Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced, unsigned Depth,
@@ -7171,6 +7184,14 @@ void Sema::MarkUsedTemplateParameters(ArrayRef<TemplateArgument> TemplateArgs,
/*OnlyDeduced=*/false, Depth, Used);
}
+void Sema::MarkUsedTemplateParameters(
+ ArrayRef<TemplateArgumentLoc> TemplateArgs, unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ ::MarkUsedTemplateParameters(Context, TemplateArgs[I].getArgument(),
+ /*OnlyDeduced=*/false, Depth, Used);
+}
+
void Sema::MarkDeducedTemplateParameters(
ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
diff --git a/clang/lib/Sema/SemaTemplateDeductionGuide.cpp b/clang/lib/Sema/SemaTemplateDeductionGuide.cpp
index 3d54d1e..9a61888 100644
--- a/clang/lib/Sema/SemaTemplateDeductionGuide.cpp
+++ b/clang/lib/Sema/SemaTemplateDeductionGuide.cpp
@@ -1171,17 +1171,46 @@ BuildDeductionGuideForTypeAlias(Sema &SemaRef,
Args.addOuterTemplateArguments(TransformedDeducedAliasArgs);
for (unsigned Index = 0; Index < DeduceResults.size(); ++Index) {
const auto &D = DeduceResults[Index];
+ auto *TP = F->getTemplateParameters()->getParam(Index);
if (IsNonDeducedArgument(D)) {
// 2): Non-deduced template parameters would be substituted later.
continue;
}
TemplateArgumentLoc Input =
SemaRef.getTrivialTemplateArgumentLoc(D, QualType(), SourceLocation{});
- TemplateArgumentLoc Output;
- if (!SemaRef.SubstTemplateArgument(Input, Args, Output)) {
- assert(TemplateArgsForBuildingFPrime[Index].isNull() &&
- "InstantiatedArgs must be null before setting");
- TemplateArgsForBuildingFPrime[Index] = Output.getArgument();
+ TemplateArgumentListInfo Output;
+ if (SemaRef.SubstTemplateArguments(Input, Args, Output))
+ return nullptr;
+ assert(TemplateArgsForBuildingFPrime[Index].isNull() &&
+ "InstantiatedArgs must be null before setting");
+ // CheckTemplateArgument is necessary for NTTP initializations.
+ // FIXME: We may want to call CheckTemplateArguments instead, but we cannot
+ // match packs as usual, since packs can appear in the middle of the
+ // parameter list of a synthesized CTAD guide. See also the FIXME in
+ // test/SemaCXX/cxx20-ctad-type-alias.cpp:test25.
+ Sema::CheckTemplateArgumentInfo CTAI;
+ if (Input.getArgument().getKind() == TemplateArgument::Pack) {
+ for (auto TA : Output.arguments()) {
+ if (SemaRef.CheckTemplateArgument(
+ TP, TA, F, F->getLocation(), F->getLocation(),
+ /*ArgumentPackIndex=*/-1, CTAI,
+ Sema::CheckTemplateArgumentKind::CTAK_Specified))
+ return nullptr;
+ }
+ // We will substitute the non-deduced template arguments with these
+ // transformed (unpacked at this point) arguments, where that substitution
+ // requires a pack for the corresponding parameter packs.
+ TemplateArgsForBuildingFPrime[Index] =
+ TemplateArgument::CreatePackCopy(Context, CTAI.SugaredConverted);
+ } else {
+ assert(Output.arguments().size() == 1);
+ TemplateArgumentLoc Transformed = Output.arguments()[0];
+ if (SemaRef.CheckTemplateArgument(
+ TP, Transformed, F, F->getLocation(), F->getLocation(),
+ /*ArgumentPackIndex=*/-1, CTAI,
+ Sema::CheckTemplateArgumentKind::CTAK_Specified))
+ return nullptr;
+ TemplateArgsForBuildingFPrime[Index] = CTAI.SugaredConverted[0];
}
}
@@ -1428,10 +1457,13 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
DeclareImplicitDeductionGuidesForTypeAlias(*this, AliasTemplate, Loc);
return;
}
- if (CXXRecordDecl *DefRecord =
- cast<CXXRecordDecl>(Template->getTemplatedDecl())->getDefinition()) {
+ CXXRecordDecl *DefRecord =
+ dyn_cast_or_null<CXXRecordDecl>(Template->getTemplatedDecl());
+ if (!DefRecord)
+ return;
+ if (const CXXRecordDecl *Definition = DefRecord->getDefinition()) {
if (TemplateDecl *DescribedTemplate =
- DefRecord->getDescribedClassTemplate())
+ Definition->getDescribedClassTemplate())
Template = DescribedTemplate;
}
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index f1c9c5c..1f762ca 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -628,9 +628,14 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
Inst.InstantiationRange = InstantiationRange;
Inst.InConstraintSubstitution =
Inst.Kind == CodeSynthesisContext::ConstraintSubstitution;
- if (!SemaRef.CodeSynthesisContexts.empty())
+ Inst.InParameterMappingSubstitution =
+ Inst.Kind == CodeSynthesisContext::ParameterMappingSubstitution;
+ if (!SemaRef.CodeSynthesisContexts.empty()) {
Inst.InConstraintSubstitution |=
SemaRef.CodeSynthesisContexts.back().InConstraintSubstitution;
+ Inst.InParameterMappingSubstitution |=
+ SemaRef.CodeSynthesisContexts.back().InParameterMappingSubstitution;
+ }
Invalid = SemaRef.pushCodeSynthesisContext(Inst);
if (!Invalid) {
@@ -1375,6 +1380,7 @@ std::optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
// Template Instantiation for Types
//===----------------------------------------------------------------------===/
namespace {
+
class TemplateInstantiator : public TreeTransform<TemplateInstantiator> {
const MultiLevelTemplateArgumentList &TemplateArgs;
SourceLocation Loc;
@@ -1387,7 +1393,11 @@ namespace {
// Whether an incomplete substituion should be treated as an error.
bool BailOutOnIncomplete;
- private:
+ // Whether to rebuild pack expansion types; We don't do that when
+ // rebuilding the parameter mapping of a fold expression appearing
+ // in a constraint expression.
+ bool BuildPackExpansionTypes = true;
+
// CWG2770: Function parameters should be instantiated when they are
// needed by a satisfaction check of an atomic constraint or
// (recursively) by another function parameter.
@@ -1410,6 +1420,17 @@ namespace {
return EvaluateConstraints;
}
+ inline static struct ForParameterMappingSubstitution_t {
+ } ForParameterMappingSubstitution;
+
+ TemplateInstantiator(ForParameterMappingSubstitution_t, Sema &SemaRef,
+ SourceLocation Loc,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool BuildPackExpansionTypes)
+ : inherited(SemaRef), TemplateArgs(TemplateArgs), Loc(Loc),
+ BailOutOnIncomplete(false),
+ BuildPackExpansionTypes(BuildPackExpansionTypes) {}
+
/// Determine whether the given type \p T has already been
/// transformed.
///
@@ -1444,7 +1465,8 @@ namespace {
bool &ShouldExpand, bool &RetainExpansion,
UnsignedOrNone &NumExpansions) {
if (SemaRef.CurrentInstantiationScope &&
- SemaRef.inConstraintSubstitution()) {
+ (SemaRef.inConstraintSubstitution() ||
+ SemaRef.inParameterMappingSubstitution())) {
for (UnexpandedParameterPack ParmPack : Unexpanded) {
NamedDecl *VD = ParmPack.first.dyn_cast<NamedDecl *>();
if (auto *PVD = dyn_cast_if_present<ParmVarDecl>(VD);
@@ -1465,10 +1487,10 @@ namespace {
TemplateArgument ForgetPartiallySubstitutedPack() {
TemplateArgument Result;
- if (NamedDecl *PartialPack
- = SemaRef.CurrentInstantiationScope->getPartiallySubstitutedPack()){
- MultiLevelTemplateArgumentList &TemplateArgs
- = const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
+ if (NamedDecl *PartialPack = SemaRef.CurrentInstantiationScope
+ ->getPartiallySubstitutedPack()) {
+ MultiLevelTemplateArgumentList &TemplateArgs =
+ const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
unsigned Depth, Index;
std::tie(Depth, Index) = getDepthAndIndex(PartialPack);
if (TemplateArgs.hasTemplateArgument(Depth, Index)) {
@@ -1488,10 +1510,10 @@ namespace {
if (Arg.isNull())
return;
- if (NamedDecl *PartialPack
- = SemaRef.CurrentInstantiationScope->getPartiallySubstitutedPack()){
- MultiLevelTemplateArgumentList &TemplateArgs
- = const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
+ if (NamedDecl *PartialPack = SemaRef.CurrentInstantiationScope
+ ->getPartiallySubstitutedPack()) {
+ MultiLevelTemplateArgumentList &TemplateArgs =
+ const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
unsigned Depth, Index;
std::tie(Depth, Index) = getDepthAndIndex(PartialPack);
TemplateArgs.setArgument(Depth, Index, Arg);
@@ -1508,9 +1530,9 @@ namespace {
std::move(New);
return Old;
}
+
void RememberSubstitution(MultiLevelTemplateArgumentList Old) {
- const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs) =
- std::move(Old);
+ const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs) = Old;
}
TemplateArgument
@@ -1691,6 +1713,24 @@ namespace {
return inherited::TransformTemplateArgument(Input, Output, Uneval);
}
+ // This has to be here to allow its overload.
+ ExprResult RebuildPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
+ UnsignedOrNone NumExpansions) {
+ return inherited::RebuildPackExpansion(Pattern, EllipsisLoc,
+ NumExpansions);
+ }
+
+ TemplateArgumentLoc RebuildPackExpansion(TemplateArgumentLoc Pattern,
+ SourceLocation EllipsisLoc,
+ UnsignedOrNone NumExpansions) {
+ // We don't rewrite a PackExpansion type when we want to normalize a
+ // CXXFoldExpr constraint. We'll expand it when evaluating the constraint.
+ if (BuildPackExpansionTypes)
+ return inherited::RebuildPackExpansion(Pattern, EllipsisLoc,
+ NumExpansions);
+ return Pattern;
+ }
+
using TreeTransform::TransformTemplateSpecializationType;
QualType
TransformTemplateSpecializationType(TypeLocBuilder &TLB,
@@ -1961,7 +2001,8 @@ Decl *TemplateInstantiator::TransformDecl(SourceLocation Loc, Decl *D) {
if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(D);
PVD && SemaRef.CurrentInstantiationScope &&
- SemaRef.inConstraintSubstitution() &&
+ (SemaRef.inConstraintSubstitution() ||
+ SemaRef.inParameterMappingSubstitution()) &&
maybeInstantiateFunctionParameterToScope(PVD))
return nullptr;
@@ -2759,18 +2800,29 @@ TemplateInstantiator::TransformExprRequirement(concepts::ExprRequirement *Req) {
concepts::NestedRequirement *
TemplateInstantiator::TransformNestedRequirement(
concepts::NestedRequirement *Req) {
- if (!Req->isDependent() && !AlwaysRebuild())
- return Req;
+
+ ASTContext &C = SemaRef.Context;
+
+ Expr *Constraint = Req->getConstraintExpr();
+ ConstraintSatisfaction Satisfaction;
+
+ auto NestedReqWithDiag = [&C, this](Expr *E,
+ ConstraintSatisfaction Satisfaction) {
+ Satisfaction.IsSatisfied = false;
+ SmallString<128> Entity;
+ llvm::raw_svector_ostream OS(Entity);
+ E->printPretty(OS, nullptr, SemaRef.getPrintingPolicy());
+ return new (C) concepts::NestedRequirement(
+ SemaRef.Context, C.backupStr(Entity), std::move(Satisfaction));
+ };
+
if (Req->hasInvalidConstraint()) {
if (AlwaysRebuild())
return RebuildNestedRequirement(Req->getInvalidConstraintEntity(),
Req->getConstraintSatisfaction());
return Req;
}
- Sema::InstantiatingTemplate ReqInst(SemaRef,
- Req->getConstraintExpr()->getBeginLoc(), Req,
- Sema::InstantiatingTemplate::ConstraintsCheck{},
- Req->getConstraintExpr()->getSourceRange());
+
if (!getEvaluateConstraints()) {
ExprResult TransConstraint = TransformExpr(Req->getConstraintExpr());
if (TransConstraint.isInvalid() || !TransConstraint.get())
@@ -2783,45 +2835,45 @@ TemplateInstantiator::TransformNestedRequirement(
SemaRef.Context, TransConstraint.get(), Satisfaction);
}
- ExprResult TransConstraint;
- ConstraintSatisfaction Satisfaction;
- TemplateDeductionInfo Info(Req->getConstraintExpr()->getBeginLoc());
+ bool Success;
+ Expr *NewConstraint;
+ TemplateDeductionInfo Info(Constraint->getBeginLoc());
{
EnterExpressionEvaluationContext ContextRAII(
SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
- Sema::SFINAETrap Trap(SemaRef);
- Sema::InstantiatingTemplate ConstrInst(SemaRef,
- Req->getConstraintExpr()->getBeginLoc(), Req, Info,
- Req->getConstraintExpr()->getSourceRange());
+
+ Sema::InstantiatingTemplate ConstrInst(
+ SemaRef, Constraint->getBeginLoc(), Req,
+ Sema::InstantiatingTemplate::ConstraintsCheck(),
+ Constraint->getSourceRange());
+
if (ConstrInst.isInvalid())
return nullptr;
- llvm::SmallVector<Expr *> Result;
- if (!SemaRef.CheckConstraintSatisfaction(
- nullptr,
- AssociatedConstraint(Req->getConstraintExpr(),
- SemaRef.ArgPackSubstIndex),
- Result, TemplateArgs, Req->getConstraintExpr()->getSourceRange(),
- Satisfaction) &&
- !Result.empty())
- TransConstraint = Result[0];
- assert(!Trap.hasErrorOccurred() && "Substitution failures must be handled "
- "by CheckConstraintSatisfaction.");
+
+ Sema::SFINAETrap Trap(SemaRef);
+
+ Success = !SemaRef.CheckConstraintSatisfaction(
+ Req, AssociatedConstraint(Constraint, SemaRef.ArgPackSubstIndex),
+ TemplateArgs, Constraint->getSourceRange(), Satisfaction,
+ /*TopLevelConceptId=*/nullptr, &NewConstraint);
+
+ assert(!Success || !Trap.hasErrorOccurred() &&
+ "Substitution failures must be handled "
+ "by CheckConstraintSatisfaction.");
}
- ASTContext &C = SemaRef.Context;
- if (TransConstraint.isUsable() &&
- TransConstraint.get()->isInstantiationDependent())
- return new (C) concepts::NestedRequirement(TransConstraint.get());
- if (TransConstraint.isInvalid() || !TransConstraint.get() ||
- Satisfaction.HasSubstitutionFailure()) {
- SmallString<128> Entity;
- llvm::raw_svector_ostream OS(Entity);
- Req->getConstraintExpr()->printPretty(OS, nullptr,
- SemaRef.getPrintingPolicy());
- return new (C) concepts::NestedRequirement(
- SemaRef.Context, C.backupStr(Entity), Satisfaction);
+
+ if (!Success || Satisfaction.HasSubstitutionFailure())
+ return NestedReqWithDiag(Constraint, Satisfaction);
+
+ // FIXME: const correctness
+ // MLTAL might be dependent.
+ if (!NewConstraint) {
+ if (!Satisfaction.IsSatisfied)
+ return NestedReqWithDiag(Constraint, Satisfaction);
+
+ NewConstraint = Constraint;
}
- return new (C)
- concepts::NestedRequirement(C, TransConstraint.get(), Satisfaction);
+ return new (C) concepts::NestedRequirement(C, NewConstraint, Satisfaction);
}
TypeSourceInfo *Sema::SubstType(TypeSourceInfo *T,
@@ -3078,7 +3130,7 @@ bool Sema::SubstTypeConstraint(
const ASTTemplateArgumentListInfo *TemplArgInfo =
TC->getTemplateArgsAsWritten();
- if (!EvaluateConstraints) {
+ if (!EvaluateConstraints && !inParameterMappingSubstitution()) {
UnsignedOrNone Index = TC->getArgPackSubstIndex();
if (!Index)
Index = SemaRef.ArgPackSubstIndex;
@@ -4378,6 +4430,16 @@ bool Sema::SubstTemplateArguments(
return Instantiator.TransformTemplateArguments(Args.begin(), Args.end(), Out);
}
+bool Sema::SubstTemplateArgumentsInParameterMapping(
+ ArrayRef<TemplateArgumentLoc> Args, SourceLocation BaseLoc,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateArgumentListInfo &Out, bool BuildPackExpansionTypes) {
+ TemplateInstantiator Instantiator(
+ TemplateInstantiator::ForParameterMappingSubstitution, *this, BaseLoc,
+ TemplateArgs, BuildPackExpansionTypes);
+ return Instantiator.TransformTemplateArguments(Args.begin(), Args.end(), Out);
+}
+
ExprResult
Sema::SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs) {
if (!E)
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 6967301..51b55b8 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -3722,10 +3722,6 @@ public:
ParentContext);
}
- /// Build a new Objective-C boxed expression.
- ///
- /// By default, performs semantic analysis to build the new expression.
- /// Subclasses may override this routine to provide different behavior.
ExprResult RebuildConceptSpecializationExpr(NestedNameSpecifierLoc NNS,
SourceLocation TemplateKWLoc, DeclarationNameInfo ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
@@ -5110,9 +5106,13 @@ bool TreeTransform<Derived>::TransformTemplateArguments(
typedef TemplateArgumentLocInventIterator<Derived,
TemplateArgument::pack_iterator>
PackLocIterator;
+
+ TemplateArgumentListInfo *PackOutput = &Outputs;
+ TemplateArgumentListInfo New;
+
if (TransformTemplateArguments(
PackLocIterator(*this, In.getArgument().pack_begin()),
- PackLocIterator(*this, In.getArgument().pack_end()), Outputs,
+ PackLocIterator(*this, In.getArgument().pack_end()), *PackOutput,
Uneval))
return true;
@@ -5179,7 +5179,6 @@ bool TreeTransform<Derived>::TransformTemplateArguments(
}
return false;
-
}
// FIXME: Find ways to reduce code duplication for pack expansions.
@@ -6247,7 +6246,7 @@ ParmVarDecl *TreeTransform<Derived>::TransformFunctionTypeParam(
/* DefArg */ nullptr);
newParm->setScopeInfo(OldParm->getFunctionScopeDepth(),
OldParm->getFunctionScopeIndex() + indexAdjustment);
- transformedLocalDecl(OldParm, {newParm});
+ getDerived().transformedLocalDecl(OldParm, {newParm});
return newParm;
}
@@ -7082,11 +7081,11 @@ QualType TreeTransform<Derived>::TransformUnaryTransformType(
TypeLocBuilder &TLB,
UnaryTransformTypeLoc TL) {
QualType Result = TL.getType();
+ TypeSourceInfo *NewBaseTSI = TL.getUnderlyingTInfo();
if (Result->isDependentType()) {
const UnaryTransformType *T = TL.getTypePtr();
- TypeSourceInfo *NewBaseTSI =
- getDerived().TransformType(TL.getUnderlyingTInfo());
+ NewBaseTSI = getDerived().TransformType(TL.getUnderlyingTInfo());
if (!NewBaseTSI)
return QualType();
QualType NewBase = NewBaseTSI->getType();
@@ -7101,7 +7100,7 @@ QualType TreeTransform<Derived>::TransformUnaryTransformType(
UnaryTransformTypeLoc NewTL = TLB.push<UnaryTransformTypeLoc>(Result);
NewTL.setKWLoc(TL.getKWLoc());
NewTL.setParensRange(TL.getParensRange());
- NewTL.setUnderlyingTInfo(TL.getUnderlyingTInfo());
+ NewTL.setUnderlyingTInfo(NewBaseTSI);
return Result;
}
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index c05e428..6acf79a 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -12860,10 +12860,9 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() {
llvm::SmallVector<OpenACCPrivateRecipe> RecipeList;
for (unsigned I = 0; I < VarList.size(); ++I) {
- static_assert(sizeof(OpenACCPrivateRecipe) == 2 * sizeof(int *));
+ static_assert(sizeof(OpenACCPrivateRecipe) == 1 * sizeof(int *));
VarDecl *Alloca = readDeclAs<VarDecl>();
- Expr *InitExpr = readSubExpr();
- RecipeList.push_back({Alloca, InitExpr});
+ RecipeList.push_back({Alloca});
}
return OpenACCPrivateClause::Create(getContext(), BeginLoc, LParenLoc,
@@ -12886,11 +12885,10 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() {
llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
llvm::SmallVector<OpenACCFirstPrivateRecipe> RecipeList;
for (unsigned I = 0; I < VarList.size(); ++I) {
- static_assert(sizeof(OpenACCFirstPrivateRecipe) == 3 * sizeof(int *));
+ static_assert(sizeof(OpenACCFirstPrivateRecipe) == 2 * sizeof(int *));
VarDecl *Recipe = readDeclAs<VarDecl>();
- Expr *InitExpr = readSubExpr();
VarDecl *RecipeTemp = readDeclAs<VarDecl>();
- RecipeList.push_back({Recipe, InitExpr, RecipeTemp});
+ RecipeList.push_back({Recipe, RecipeTemp});
}
return OpenACCFirstPrivateClause::Create(getContext(), BeginLoc, LParenLoc,
@@ -13011,10 +13009,9 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() {
llvm::SmallVector<OpenACCReductionRecipe> RecipeList;
for (unsigned I = 0; I < VarList.size(); ++I) {
- static_assert(sizeof(OpenACCReductionRecipe) == 2 * sizeof(int *));
+ static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int *));
VarDecl *Recipe = readDeclAs<VarDecl>();
- Expr *InitExpr = readSubExpr();
- RecipeList.push_back({Recipe, InitExpr});
+ RecipeList.push_back({Recipe});
}
return OpenACCReductionClause::Create(getContext(), BeginLoc, LParenLoc, Op,
diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp
index cf32d4f..5456e73 100644
--- a/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -2424,7 +2424,7 @@ void ASTDeclReader::VisitImplicitConceptSpecializationDecl(
VisitDecl(D);
llvm::SmallVector<TemplateArgument, 4> Args;
for (unsigned I = 0; I < D->NumTemplateArgs; ++I)
- Args.push_back(Record.readTemplateArgument(/*Canonicalize=*/true));
+ Args.push_back(Record.readTemplateArgument(/*Canonicalize=*/false));
D->setTemplateArguments(Args);
}
diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp
index 70b898a..eef97a8 100644
--- a/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -807,15 +807,19 @@ readConstraintSatisfaction(ASTRecordReader &Record) {
if (!Satisfaction.IsSatisfied) {
unsigned NumDetailRecords = Record.readInt();
for (unsigned i = 0; i != NumDetailRecords; ++i) {
- if (/* IsDiagnostic */Record.readInt()) {
+ auto Kind = Record.readInt();
+ if (Kind == 0) {
SourceLocation DiagLocation = Record.readSourceLocation();
StringRef DiagMessage = C.backupStr(Record.readString());
- Satisfaction.Details.emplace_back(
- new (C) ConstraintSatisfaction::SubstitutionDiagnostic(
- DiagLocation, DiagMessage));
- } else
+ Satisfaction.Details.emplace_back(new (
+ C) ConstraintSubstitutionDiagnostic(DiagLocation, DiagMessage));
+ } else if (Kind == 1) {
Satisfaction.Details.emplace_back(Record.readExpr());
+ } else {
+ assert(Kind == 2);
+ Satisfaction.Details.emplace_back(Record.readConceptReference());
+ }
}
}
return Satisfaction;
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index cdf95ba..09b1e58 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -8779,9 +8779,8 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) {
writeOpenACCVarList(PC);
for (const OpenACCPrivateRecipe &R : PC->getInitRecipes()) {
- static_assert(sizeof(R) == 2 * sizeof(int *));
+ static_assert(sizeof(R) == 1 * sizeof(int *));
AddDeclRef(R.AllocaDecl);
- AddStmt(const_cast<Expr *>(R.InitExpr));
}
return;
}
@@ -8803,9 +8802,8 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) {
writeOpenACCVarList(FPC);
for (const OpenACCFirstPrivateRecipe &R : FPC->getInitRecipes()) {
- static_assert(sizeof(R) == 3 * sizeof(int *));
+ static_assert(sizeof(R) == 2 * sizeof(int *));
AddDeclRef(R.AllocaDecl);
- AddStmt(const_cast<Expr *>(R.InitExpr));
AddDeclRef(R.InitFromTemporary);
}
return;
@@ -8927,9 +8925,8 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) {
writeOpenACCVarList(RC);
for (const OpenACCReductionRecipe &R : RC->getRecipes()) {
- static_assert(sizeof(OpenACCReductionRecipe) == 2 * sizeof(int *));
+ static_assert(sizeof(OpenACCReductionRecipe) == 1 * sizeof(int *));
AddDeclRef(R.AllocaDecl);
- AddStmt(const_cast<Expr *>(R.InitExpr));
}
return;
}
diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp
index ebda91e..acf3453 100644
--- a/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -482,14 +482,20 @@ addConstraintSatisfaction(ASTRecordWriter &Record,
if (!Satisfaction.IsSatisfied) {
Record.push_back(Satisfaction.NumRecords);
for (const auto &DetailRecord : Satisfaction) {
- auto *E = dyn_cast<Expr *>(DetailRecord);
- Record.push_back(/* IsDiagnostic */ E == nullptr);
- if (E)
- Record.AddStmt(E);
- else {
- auto *Diag = cast<std::pair<SourceLocation, StringRef> *>(DetailRecord);
+ if (auto *Diag = dyn_cast<const ConstraintSubstitutionDiagnostic *>(
+ DetailRecord)) {
+ Record.push_back(/*Kind=*/0);
Record.AddSourceLocation(Diag->first);
Record.AddString(Diag->second);
+ continue;
+ }
+ if (auto *E = dyn_cast<const Expr *>(DetailRecord)) {
+ Record.push_back(/*Kind=*/1);
+ Record.AddStmt(const_cast<Expr *>(E));
+ } else {
+ Record.push_back(/*Kind=*/2);
+ auto *CR = cast<const ConceptReference *>(DetailRecord);
+ Record.AddConceptReference(CR);
}
}
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp
index e1f9a77..955b8d1 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp
@@ -385,6 +385,10 @@ public:
if (RTC.isUnretained(RetValue->getType()))
return;
}
+ if (retainsRet && *retainsRet) {
+ CreateOrCopyFnCall.insert(RetValue);
+ return;
+ }
if (auto *CE = dyn_cast<CallExpr>(RetValue)) {
auto *Callee = CE->getDirectCallee();
if (!Callee || !isCreateOrCopyFunction(Callee))
diff --git a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index 180056c..06ba015 100644
--- a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -1254,6 +1254,15 @@ template <> struct DenseMapInfo<PrivateMethodKey> {
};
} // end namespace llvm
+// NOTE: This cache is a "global" variable, and it is cleared by
+// CallEventManager's constructor so we do not keep old entries when
+// loading/unloading ASTs. If we are worried about concurrency, we may need to
+// revisit this someday. In terms of memory, this table stays around until clang
+// quits, which also may be bad if we need to release memory.
+using PrivateMethodCacheTy =
+ llvm::DenseMap<PrivateMethodKey, std::optional<const ObjCMethodDecl *>>;
+static PrivateMethodCacheTy PrivateMethodCache;
+
static const ObjCMethodDecl *
lookupRuntimeDefinition(const ObjCInterfaceDecl *Interface,
Selector LookupSelector, bool InstanceMethod) {
@@ -1262,21 +1271,8 @@ lookupRuntimeDefinition(const ObjCInterfaceDecl *Interface,
// that repeated queries on the same ObjCIntefaceDecl and Selector
// don't incur the same cost. On some test cases, we can see the
// same query being issued thousands of times.
- //
- // NOTE: This cache is essentially a "global" variable, but it
- // only gets lazily created when we get here. The value of the
- // cache probably comes from it being global across ExprEngines,
- // where the same queries may get issued. If we are worried about
- // concurrency, or possibly loading/unloading ASTs, etc., we may
- // need to revisit this someday. In terms of memory, this table
- // stays around until clang quits, which also may be bad if we
- // need to release memory.
- using PrivateMethodCache =
- llvm::DenseMap<PrivateMethodKey, std::optional<const ObjCMethodDecl *>>;
-
- static PrivateMethodCache PMC;
std::optional<const ObjCMethodDecl *> &Val =
- PMC[{Interface, LookupSelector, InstanceMethod}];
+ PrivateMethodCache[{Interface, LookupSelector, InstanceMethod}];
// Query lookupPrivateMethod() if the cache does not hit.
if (!Val) {
@@ -1422,6 +1418,13 @@ void ObjCMethodCall::getInitialStackFrameContents(
}
}
+CallEventManager::CallEventManager(llvm::BumpPtrAllocator &alloc)
+ : Alloc(alloc) {
+ // Clear the method cache to avoid hits when multiple AST are loaded/unloaded
+ // within a single process. This can happen with unit tests, for instance.
+ PrivateMethodCache.clear();
+}
+
CallEventRef<>
CallEventManager::getSimpleCall(const CallExpr *CE, ProgramStateRef State,
const LocationContext *LCtx,
diff --git a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 8e9d6fe..af0ef52 100644
--- a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -2658,14 +2658,20 @@ RegionStoreManager::bindArray(LimitedRegionBindingsConstRef B,
return bindAggregate(B, R, V);
}
- // Handle lazy compound values.
+ // FIXME Single value constant should have been handled before this call to
+ // bindArray. This is only a hotfix to not crash.
+ if (Init.isConstant())
+ return bindAggregate(B, R, Init);
+
if (std::optional LCV = Init.getAs<nonloc::LazyCompoundVal>()) {
if (std::optional NewB = tryBindSmallArray(B, R, AT, *LCV))
return *NewB;
-
return bindAggregate(B, R, Init);
}
+ if (isa<nonloc::SymbolVal>(Init))
+ return bindAggregate(B, R, Init);
+
if (Init.isUnknown())
return bindAggregate(B, R, UnknownVal());
diff --git a/clang/lib/StaticAnalyzer/Core/Store.cpp b/clang/lib/StaticAnalyzer/Core/Store.cpp
index 971e6bc..b609f36 100644
--- a/clang/lib/StaticAnalyzer/Core/Store.cpp
+++ b/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -210,7 +210,7 @@ std::optional<const MemRegion *> StoreManager::castRegion(const MemRegion *R,
// Is the offset a multiple of the size? If so, we can layer the
// ElementRegion (with elementType == PointeeTy) directly on top of
// the base region.
- if (off % pointeeTySize == 0) {
+ if (off.isMultipleOf(pointeeTySize)) {
newIndex = off / pointeeTySize;
newSuperR = baseR;
}
diff --git a/clang/lib/Testing/TestAST.cpp b/clang/lib/Testing/TestAST.cpp
index 9ad0de9..d333895 100644
--- a/clang/lib/Testing/TestAST.cpp
+++ b/clang/lib/Testing/TestAST.cpp
@@ -61,7 +61,7 @@ void createMissingComponents(CompilerInstance &Clang) {
if (!Clang.hasFileManager())
Clang.createFileManager();
if (!Clang.hasSourceManager())
- Clang.createSourceManager(Clang.getFileManager());
+ Clang.createSourceManager();
if (!Clang.hasTarget())
Clang.createTarget();
if (!Clang.hasPreprocessor())
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp
index d370bfd..010380d 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp
@@ -31,7 +31,7 @@ public:
for (const auto &File : getDependencies()) {
CanonPath = File;
llvm::sys::path::remove_dots(CanonPath, /*remove_dot_dot=*/true);
- llvm::sys::fs::make_absolute(WorkingDirectory, CanonPath);
+ llvm::sys::path::make_absolute(WorkingDirectory, CanonPath);
C.handleFileDependency(CanonPath);
}
}
@@ -415,7 +415,7 @@ bool DependencyScanningAction::runInvocation(
any(Service.getOptimizeArgs() & ScanningOptimizations::VFS);
// Create a new FileManager to match the invocation's FileSystemOptions.
- auto *FileMgr = ScanInstance.createFileManager();
+ ScanInstance.createFileManager();
// Use the dependency scanning optimized file system if requested to do so.
if (DepFS) {
@@ -423,16 +423,17 @@ bool DependencyScanningAction::runInvocation(
if (!ScanInstance.getHeaderSearchOpts().ModuleCachePath.empty()) {
SmallString<256> ModulesCachePath;
normalizeModuleCachePath(
- *FileMgr, ScanInstance.getHeaderSearchOpts().ModuleCachePath,
- ModulesCachePath);
+ ScanInstance.getFileManager(),
+ ScanInstance.getHeaderSearchOpts().ModuleCachePath, ModulesCachePath);
DepFS->setBypassedPathPrefix(ModulesCachePath);
}
ScanInstance.setDependencyDirectivesGetter(
- std::make_unique<ScanningDependencyDirectivesGetter>(*FileMgr));
+ std::make_unique<ScanningDependencyDirectivesGetter>(
+ ScanInstance.getFileManager()));
}
- ScanInstance.createSourceManager(*FileMgr);
+ ScanInstance.createSourceManager();
// Create a collection of stable directories derived from the ScanInstance
// for determining whether module dependencies would fully resolve from
diff --git a/clang/lib/Tooling/Tooling.cpp b/clang/lib/Tooling/Tooling.cpp
index 2d4790b..ea5a372 100644
--- a/clang/lib/Tooling/Tooling.cpp
+++ b/clang/lib/Tooling/Tooling.cpp
@@ -458,7 +458,7 @@ bool FrontendActionFactory::runInvocation(
if (!Compiler.hasDiagnostics())
return false;
- Compiler.createSourceManager(*Files);
+ Compiler.createSourceManager();
const bool Success = Compiler.ExecuteAction(*ScopedToolAction);
diff --git a/clang/test/AST/ByteCode/const-eval.c b/clang/test/AST/ByteCode/const-eval.c
index c6b51d1..d6cf600 100644
--- a/clang/test/AST/ByteCode/const-eval.c
+++ b/clang/test/AST/ByteCode/const-eval.c
@@ -144,7 +144,7 @@ EVAL_EXPR(52, &pr24622 == (void *)&PR24622);
// We evaluate these by providing 2s' complement semantics in constant
// expressions, like we do for integers.
-void *PR28739a = (__int128)(unsigned long)-1 + &PR28739a; // both-warning {{the pointer incremented by 18446744073709551615 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2305843009213693952 elements)}}
+void *PR28739a = (__int128)(unsigned long)-1 + &PR28739a; // both-warning {{the pointer incremented by 18'446'744'073'709'551'615 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2'305'843'009'213'693'952 elements)}}
void *PR28739b = &PR28739b + (__int128)(unsigned long)-1; // both-warning {{refers past the last possible element}}
__int128 PR28739c = (&PR28739c + (__int128)(unsigned long)-1) - &PR28739c; // both-warning {{refers past the last possible element}}
diff --git a/clang/test/AST/ByteCode/literals.cpp b/clang/test/AST/ByteCode/literals.cpp
index 5bc3f7f..5028ebf 100644
--- a/clang/test/AST/ByteCode/literals.cpp
+++ b/clang/test/AST/ByteCode/literals.cpp
@@ -28,6 +28,8 @@ static_assert(number != 10, ""); // both-error{{failed}} \
static_assert(__objc_yes, "");
static_assert(!__objc_no, "");
+static_assert((long long)0x00000000FFFF0000 == 4294901760, "");
+
constexpr bool b = number;
static_assert(b, "");
constexpr int one = true;
diff --git a/clang/test/AST/HLSL/RootSignature-Target-AST.hlsl b/clang/test/AST/HLSL/RootSignature-Target-AST.hlsl
index 91441e3..129ab70 100644
--- a/clang/test/AST/HLSL/RootSignature-Target-AST.hlsl
+++ b/clang/test/AST/HLSL/RootSignature-Target-AST.hlsl
@@ -1,9 +1,15 @@
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-rootsignature -ast-dump \
+// RUN: -fdx-rootsignature-version=rootsig_1_0 \
+// RUN: -hlsl-entry EntryRootSig -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_0
+
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-rootsignature -ast-dump \
+// RUN: -fdx-rootsignature-version=rootsig_1_1 \
// RUN: -hlsl-entry EntryRootSig -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_1
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-rootsignature -ast-dump \
-// RUN: -fdx-rootsignature-version=rootsig_1_0 \
-// RUN: -hlsl-entry EntryRootSig -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_0
+// RUN: -fdx-rootsignature-version=rootsig_1_2 \
+// RUN: -hlsl-entry EntryRootSig -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_2
+
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-rootsignature -ast-dump \
// RUN: -D CmdRS='"UAV(u0)"'\
@@ -12,11 +18,13 @@
// CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[ENTRY_RS_DECL:__hlsl_rootsig_decl_\d*]]
// CHECK-V1_0-SAME: version: 1.0,
// CHECK-V1_1-SAME: version: 1.1,
+// CHECK-V1_2-SAME: version: 1.2,
// CHECK-SAME: RootElements{
// CHECK-SAME: RootCBV(b0,
// CHECK-SAME: space = 0, visibility = All,
// CHECK-V1_0-SAME: flags = DataVolatile
// CHECK-V1_1-SAME: flags = DataStaticWhileSetAtExecute
+// CHECK-V1_2-SAME: flags = DataStaticWhileSetAtExecute
// CHECK-SAME: )
// CHECK-SAME: }
#define EntryRootSig "CBV(b0)"
diff --git a/clang/test/AST/HLSL/RootSignatures-AST.hlsl b/clang/test/AST/HLSL/RootSignatures-AST.hlsl
index 32da1f1..0f0f3a5 100644
--- a/clang/test/AST/HLSL/RootSignatures-AST.hlsl
+++ b/clang/test/AST/HLSL/RootSignatures-AST.hlsl
@@ -6,6 +6,9 @@
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -ast-dump \
// RUN: -fdx-rootsignature-version=rootsig_1_1 \
// RUN: -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_1
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -ast-dump \
+// RUN: -fdx-rootsignature-version=rootsig_1_2 \
+// RUN: -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_2
// This test ensures that the sample root signature is parsed without error and
// the Attr AST Node is created succesfully. If an invalid root signature was
@@ -31,6 +34,7 @@
// CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[SAMPLE_RS_DECL:__hlsl_rootsig_decl_\d*]]
// CHECK-V1_0: version: 1.0,
// CHECK-V1_1: version: 1.1,
+// CHECK-V1_2: version: 1.2,
// CHECK-SAME: RootElements{
// CHECK-SAME: RootFlags(AllowInputAssemblerInputLayout | DenyVertexShaderRootAccess),
// CHECK-SAME: RootCBV(b0,
@@ -62,6 +66,7 @@
// CHECK-SAME: s0, numDescriptors = 4, space = 1, offset = DescriptorTableOffsetAppend,
// CHECK-V1_0-SAME: flags = DescriptorsVolatile
// CHECK-V1_1-SAME: flags = None
+// CHECK-V1_2-SAME: flags = None
// CHECK-SAME: ),
// CHECK-SAME: DescriptorTable(
// CHECK-SAME: numClauses = 1, visibility = All
@@ -73,6 +78,7 @@
// CHECK-SAME: s1, filter = Anisotropic, addressU = Wrap, addressV = Wrap, addressW = Wrap,
// CHECK-SAME: mipLODBias = 0.000000e+00, maxAnisotropy = 16, comparisonFunc = LessEqual,
// CHECK-SAME: borderColor = OpaqueWhite, minLOD = 0.000000e+00, maxLOD = 3.402823e+38, space = 0, visibility = All
+// CHECK-SAME: flags = None
// CHECK-SAME: )}
// CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[SAMPLE_RS_DECL]]
@@ -131,3 +137,24 @@ void same_rs_string_main() {}
// CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[DIFF_RS_DECL]]
[RootSignature(SampleDifferentRS)]
void different_rs_string_main() {}
+
+#define SampleStaticSamplerRS \
+ "StaticSampler(s0, flags = NON_NORMALIZED_COORDINATES)"
+
+// Ensure that static samplers flags are correctly parsed in different versions
+
+// CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[DIFF_RS_DECL:__hlsl_rootsig_decl_\d*]]
+// CHECK-V1_0: version: 1.0,
+// CHECK-V1_1: version: 1.1,
+// CHECK-V1_2: version: 1.2,
+// CHECK-SAME: RootElements{
+// CHECK-SAME: StaticSampler(
+// CHECK-SAME: s0, filter = Anisotropic, addressU = Wrap, addressV = Wrap, addressW = Wrap,
+// CHECK-SAME: mipLODBias = 0.000000e+00, maxAnisotropy = 16, comparisonFunc = LessEqual,
+// CHECK-SAME: borderColor = OpaqueWhite, minLOD = 0.000000e+00, maxLOD = 3.402823e+38, space = 0, visibility = All
+// CHECK-SAME: flags = NonNormalizedCoordinates
+// CHECK-SAME: )}
+
+// CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[DIFF_RS_DECL]]
+[RootSignature(SampleStaticSamplerRS)]
+void statoc_sampler_v12_main() {}
diff --git a/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl b/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
index a490b22..6779abb 100644
--- a/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
+++ b/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl
@@ -12,7 +12,7 @@
//
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump \
// RUN: -DRESOURCE=RWStructuredBuffer %s | FileCheck -DRESOURCE=RWStructuredBuffer \
-// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-SUBSCRIPT,CHECK-SUBSCRIPT-UAV,CHECK-COUNTER,CHECK-LOAD %s
+// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-SUBSCRIPT,CHECK-SUBSCRIPT-UAV,CHECK-COUNTER,CHECK-LOAD,CHECK-COUNTER-HANDLE %s
//
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump -DEMPTY \
// RUN: -DRESOURCE=AppendStructuredBuffer %s | FileCheck -DRESOURCE=AppendStructuredBuffer \
@@ -20,7 +20,7 @@
//
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump \
// RUN: -DRESOURCE=AppendStructuredBuffer %s | FileCheck -DRESOURCE=AppendStructuredBuffer \
-// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-NOSUBSCRIPT,CHECK-APPEND %s
+// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-NOSUBSCRIPT,CHECK-APPEND,CHECK-COUNTER-HANDLE %s
//
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump -DEMPTY \
// RUN: -DRESOURCE=ConsumeStructuredBuffer %s | FileCheck -DRESOURCE=ConsumeStructuredBuffer \
@@ -28,7 +28,7 @@
//
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump \
// RUN: -DRESOURCE=ConsumeStructuredBuffer %s | FileCheck -DRESOURCE=ConsumeStructuredBuffer \
-// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-NOSUBSCRIPT,CHECK-CONSUME %s
+// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-NOSUBSCRIPT,CHECK-CONSUME,CHECK-COUNTER-HANDLE %s
//
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump -DEMPTY \
// RUN: -DRESOURCE=RasterizerOrderedStructuredBuffer %s | FileCheck -DRESOURCE=RasterizerOrderedStructuredBuffer \
@@ -36,7 +36,7 @@
//
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump \
// RUN: -DRESOURCE=RasterizerOrderedStructuredBuffer %s | FileCheck -DRESOURCE=RasterizerOrderedStructuredBuffer \
-// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-ROV,CHECK-SUBSCRIPT,CHECK-SUBSCRIPT-UAV,CHECK-LOAD %s
+// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-ROV,CHECK-SUBSCRIPT,CHECK-SUBSCRIPT-UAV,CHECK-LOAD,CHECK-COUNTER-HANDLE %s
// This test tests two different AST generations for each structured buffer.
// The "EMPTY" test mode verifies the AST generated by forward declaration
@@ -113,6 +113,11 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
// CHECK-NEXT: DeclRefExpr {{.*}} 'const hlsl::[[RESOURCE]]<element_type>' ParmVar {{.*}} 'other' 'const hlsl::[[RESOURCE]]<element_type> &'
+// CHECK-COUNTER-HANDLE-NEXT: BinaryOperator {{.*}} '='
+// CHECK-COUNTER-HANDLE-NEXT: MemberExpr {{.*}} lvalue .__counter_handle
+// CHECK-COUNTER-HANDLE-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-COUNTER-HANDLE-NEXT: MemberExpr {{.*}} lvalue .__counter_handle
+// CHECK-COUNTER-HANDLE-NEXT: DeclRefExpr {{.*}} 'const hlsl::[[RESOURCE]]<element_type>' ParmVar {{.*}} 'other' 'const hlsl::[[RESOURCE]]<element_type> &'
// CHECK-NEXT: AlwaysInlineAttr
// operator=
@@ -125,6 +130,11 @@ RESOURCE<float> Buffer;
// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle
// CHECK-NEXT: DeclRefExpr {{.*}} 'const hlsl::[[RESOURCE]]<element_type>' ParmVar {{.*}} 'other' 'const hlsl::[[RESOURCE]]<element_type> &'
+// CHECK-COUNTER-HANDLE: BinaryOperator {{.*}} '='
+// CHECK-COUNTER-HANDLE: MemberExpr {{.*}} lvalue .__counter_handle
+// CHECK-COUNTER-HANDLE: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
+// CHECK-COUNTER-HANDLE: MemberExpr {{.*}} lvalue .__counter_handle
+// CHECK-COUNTER-HANDLE: DeclRefExpr {{.*}} 'const hlsl::[[RESOURCE]]<element_type>' ParmVar {{.*}} 'other' 'const hlsl::[[RESOURCE]]<element_type> &'
// CHECK-NEXT: ReturnStmt
// CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]<element_type>' lvalue implicit this
// CHECK-NEXT: AlwaysInlineAttr
@@ -334,3 +344,8 @@ RESOURCE<float> Buffer;
// CHECK-ROV-SAME{LITERAL}: [[hlsl::is_rov]]
// CHECK-SAME{LITERAL}: [[hlsl::raw_buffer]]
// CHECK-SAME{LITERAL}: [[hlsl::contained_type(float)]]
+// CHECK-COUNTER-HANDLE: FieldDecl {{.*}} implicit referenced __counter_handle '__hlsl_resource_t
+// CHECK-COUNTER-HANDLE-SAME{LITERAL}: [[hlsl::resource_class(UAV)]]
+// CHECK-COUNTER-HANDLE-SAME{LITERAL}: [[hlsl::raw_buffer]]
+// CHECK-COUNTER-HANDLE-SAME{LITERAL}: [[hlsl::is_counter]]
+// CHECK-COUNTER-HANDLE-SAME{LITERAL}: [[hlsl::contained_type(float)]]
diff --git a/clang/test/AST/ast-dump-concepts.cpp b/clang/test/AST/ast-dump-concepts.cpp
index 84d981d..9419dba 100644
--- a/clang/test/AST/ast-dump-concepts.cpp
+++ b/clang/test/AST/ast-dump-concepts.cpp
@@ -20,8 +20,9 @@ struct Foo {
// CHECK: TemplateTypeParmDecl {{.*}} referenced Concept {{.*}} 'binary_concept'
// CHECK-NEXT: `-ConceptSpecializationExpr {{.*}} <col:13, col:31> 'bool' Concept {{.*}} 'binary_concept'
// CHECK-NEXT: |-ImplicitConceptSpecializationDecl {{.*}} <line:13:9> col:9
- // CHECK-NEXT: | |-TemplateArgument type 'type-parameter-1-0'
- // CHECK-NEXT: | | `-TemplateTypeParmType {{.*}} 'type-parameter-1-0' dependent {{.*}}depth 1 index 0
+ // CHECK-NEXT: | |-TemplateArgument type 'R'
+ // CHECK-NEXT: | | `-TemplateTypeParmType {{.*}} 'R' dependent {{.*}}depth 1 index 0
+ // CHECK-NEXT: | | `-TemplateTypeParm {{.*}} 'R'
// CHECK-NEXT: | `-TemplateArgument type 'int'
// CHECK-NEXT: | `-BuiltinType {{.*}} 'int'
// CHECK-NEXT: |-TemplateArgument {{.*}} type 'R'
@@ -35,8 +36,9 @@ struct Foo {
// CHECK: TemplateTypeParmDecl {{.*}} referenced Concept {{.*}} 'unary_concept'
// CHECK-NEXT: `-ConceptSpecializationExpr {{.*}} <col:13> 'bool'
// CHECK-NEXT: |-ImplicitConceptSpecializationDecl {{.*}} <line:10:9> col:9
- // CHECK-NEXT: | `-TemplateArgument type 'type-parameter-1-0'
- // CHECK-NEXT: | `-TemplateTypeParmType {{.*}} 'type-parameter-1-0' dependent {{.*}}depth 1 index 0
+ // CHECK-NEXT: | `-TemplateArgument type 'R'
+ // CHECK-NEXT: | `-TemplateTypeParmType {{.*}} 'R' dependent {{.*}}depth 1 index 0
+ // CHECK-NEXT: | `-TemplateTypeParm {{.*}} 'R'
template <unary_concept R>
Foo(R);
diff --git a/clang/test/AST/ast-dump-ctad-alias.cpp b/clang/test/AST/ast-dump-ctad-alias.cpp
index 781fb9f..9a3adbc 100644
--- a/clang/test/AST/ast-dump-ctad-alias.cpp
+++ b/clang/test/AST/ast-dump-ctad-alias.cpp
@@ -185,17 +185,18 @@ void foo() {
// CHECK-NEXT: | |-BinaryOperator {{.*}} 'bool' '&&'
// CHECK-NEXT: | | |-ConceptSpecializationExpr {{.*}} 'bool' Concept {{.*}} 'invocable'
// CHECK-NEXT: | | | |-ImplicitConceptSpecializationDecl {{.*}}
-// CHECK-NEXT: | | | | |-TemplateArgument type 'type-parameter-0-2'
-// CHECK-NEXT: | | | | | `-TemplateTypeParmType {{.*}} 'type-parameter-0-2' dependent depth 0 index 2
-// CHECK-NEXT: | | | | `-TemplateArgument pack '<GH124715::Packs<type-parameter-0-1...>>'
-// CHECK-NEXT: | | | | `-TemplateArgument type 'GH124715::Packs<type-parameter-0-1...>'
-// CHECK-NEXT: | | | | `-TemplateSpecializationType {{.*}} 'GH124715::Packs<type-parameter-0-1...>' dependent
-// CHECK-NEXT: | | | | |-name: 'GH124715::Packs'
+// CHECK-NEXT: | | | | |-TemplateArgument type 'U'
+// CHECK-NEXT: | | | | | `-TemplateTypeParmType {{.*}} 'U' dependent depth 0 index 2
+// CHECK-NEXT: | | | | | `-TemplateTypeParm {{.*}} 'U'
+// CHECK-NEXT: | | | | `-TemplateArgument pack '<Packs<Ts...>>'
+// CHECK-NEXT: | | | | `-TemplateArgument type 'Packs<Ts...>'
+// CHECK-NEXT: | | | | `-TemplateSpecializationType {{.*}} 'Packs<Ts...>' dependent
+// CHECK-NEXT: | | | | |-name: 'Packs':'GH124715::Packs' qualified
// CHECK-NEXT: | | | | | `-ClassTemplateDecl {{.*}} Packs
-// CHECK-NEXT: | | | | `-TemplateArgument pack '<type-parameter-0-1...>'
-// CHECK-NEXT: | | | | `-TemplateArgument type 'type-parameter-0-1...'
-// CHECK-NEXT: | | | | `-PackExpansionType {{.*}} 'type-parameter-0-1...' dependent
-// CHECK-NEXT: | | | | `-TemplateTypeParmType {{.*}} 'type-parameter-0-1' dependent contains_unexpanded_pack depth 0 index 1 pack
+// CHECK-NEXT: | | | | `-TemplateArgument type 'Ts...'
+// CHECK-NEXT: | | | | `-PackExpansionType {{.*}} 'Ts...' dependent
+// CHECK-NEXT: | | | | `-TemplateTypeParmType {{.*}} 'Ts' dependent contains_unexpanded_pack depth 0 index 1 pack
+// CHECK-NEXT: | | | | `-TemplateTypeParm {{.*}} 'Ts'
// CHECK-NEXT: | | | |-TemplateArgument {{.*}} type 'U':'type-parameter-0-2'
// CHECK-NEXT: | | | | `-TemplateTypeParmType {{.*}} 'U' dependent depth 0 index 2
// CHECK-NEXT: | | | | `-TemplateTypeParm {{.*}} 'U'
diff --git a/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h b/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h
index 39dee17..dacb713 100644
--- a/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h
+++ b/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h
@@ -17,6 +17,20 @@ template<typename T> typename remove_reference<T>::type&& move(T&& t);
#endif
+namespace std {
+
+template <bool, typename U = void> struct enable_if {
+};
+
+template <typename T> struct enable_if<true, T> {
+ using type = T;
+};
+
+template <bool value, class T = void>
+using enable_if_t = typename enable_if<value, T>::type;
+
+}
+
@class NSString;
@class NSArray;
@class NSMutableArray;
@@ -100,6 +114,7 @@ id CFBridgingRelease(CFTypeRef X) {
__attribute__((objc_root_class))
@interface NSObject
+ (instancetype) alloc;
++ (instancetype) allocWithZone:(NSZone *)zone;
+ (Class) class;
+ (Class) superclass;
- (instancetype) init;
@@ -232,6 +247,14 @@ template <typename T> struct RemovePointer<T*> {
typedef T Type;
};
+template <typename T> struct IsPointer {
+ static constexpr bool value = false;
+};
+
+template <typename T> struct IsPointer<T*> {
+ static constexpr bool value = true;
+};
+
template <typename T> struct RetainPtr {
using ValueType = typename RemovePointer<T>::Type;
using PtrType = ValueType*;
@@ -285,12 +308,23 @@ template <typename T> struct RetainPtr {
PtrType operator->() const { return t; }
T &operator*() const { return *t; }
RetainPtr &operator=(PtrType t);
- PtrType leakRef()
+
+ template <typename U = PtrType>
+ std::enable_if_t<IsPointer<U>::value, U> leakRef() CF_RETURNS_RETAINED
+ {
+ PtrType s = t;
+ t = nullptr;
+ return s;
+ }
+
+ template <typename U = PtrType>
+ std::enable_if_t<!IsPointer<U>::value, U> leakRef() NS_RETURNS_RETAINED
{
PtrType s = t;
t = nullptr;
return s;
}
+
operator PtrType() const { return t; }
operator bool() const { return t; }
diff --git a/clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm b/clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm
index 7699017..4570561 100644
--- a/clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm
+++ b/clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm
@@ -104,6 +104,14 @@ void basic_correct_arc() {
_number = value;
}
+- (id)copyWithZone:(NSZone *)zone {
+ auto copy = adoptNS([(SomeObj *)[SomeObj allocWithZone:zone] init]);
+ [copy setValue:_number];
+ [copy setNext:_next];
+ [copy setOther:_other];
+ return copy.leakRef();
+}
+
@end;
RetainPtr<CVPixelBufferRef> cf_out_argument() {
@@ -151,7 +159,7 @@ NSArray *makeArray() NS_RETURNS_RETAINED {
extern Class (*getNSArrayClass)();
NSArray *allocArrayInstance() NS_RETURNS_RETAINED {
- return [[getNSArrayClass() alloc] init];
+ return adoptNS([[getNSArrayClass() alloc] init]).leakRef();
}
extern int (*GetObj)(CF_RETURNS_RETAINED CFTypeRef* objOut);
@@ -294,7 +302,7 @@ RetainPtr<CFArrayRef> adopt_make_array() {
}
-(NSString *)make_string {
- return [[NSString alloc] initWithUTF8String:"hello"];
+ return adoptNS([[NSString alloc] initWithUTF8String:"hello"]).leakRef();
}
-(void)local_leak_string {
diff --git a/clang/test/Analysis/initializer.cpp b/clang/test/Analysis/initializer.cpp
index 713e121..88758f7 100644
--- a/clang/test/Analysis/initializer.cpp
+++ b/clang/test/Analysis/initializer.cpp
@@ -610,3 +610,51 @@ void top() {
consume(parseMatchComponent());
}
} // namespace elementwise_copy_small_array_from_post_initializer_of_cctor
+
+namespace gh147686 {
+// The problem reported in https://github.com/llvm/llvm-project/issues/147686
+// is sensitive to the initializer form: using parenthesis to initialize m_ptr
+// resulted in crashes when analyzing *m_ptr = '\0'; but using braces is fine.
+
+struct A {
+ A() : m_ptr(m_buf) { *m_ptr = '\0'; } // no-crash
+ A(int overload) : m_ptr{m_buf} { *m_ptr = '\0'; }
+ A(char src) : m_ptr(m_buf) { *m_ptr = src; } // no-crash
+ A(char src, int overload) : m_ptr{m_buf} { *m_ptr = src; }
+ char m_buf[64] = {0};
+ char * m_ptr;
+};
+
+void test1() {
+ A a;
+ clang_analyzer_eval(a.m_buf[0] == 0); // expected-warning{{TRUE}}
+ // FIXME The next eval should result in TRUE.
+ clang_analyzer_eval(*a.m_ptr == 0); // expected-warning{{UNKNOWN}}
+}
+
+void test2() {
+ A a(314);
+ clang_analyzer_eval(a.m_buf[0] == 0); // expected-warning{{TRUE}}
+ clang_analyzer_eval(*a.m_ptr == 0); // expected-warning{{TRUE}}
+}
+
+void test3() {
+ A a(0);
+ clang_analyzer_eval(a.m_buf[0] == 0); // expected-warning{{TRUE}}
+ clang_analyzer_eval(*a.m_ptr == 0); // expected-warning{{TRUE}}
+}
+
+void test3Bis(char arg) {
+ A a(arg);
+ // FIXME This test should behave like test3.
+ clang_analyzer_eval(a.m_buf[0] == arg); // expected-warning{{FALSE}} // expected-warning{{TRUE}}
+ clang_analyzer_eval(*a.m_ptr == arg); // expected-warning{{UNKNOWN}}
+}
+
+void test4(char arg) {
+ A a(arg, 314);
+ clang_analyzer_eval(a.m_buf[0] == arg); // expected-warning{{TRUE}}
+ clang_analyzer_eval(*a.m_ptr == arg); // expected-warning{{TRUE}}
+}
+
+} // namespace gh147686
diff --git a/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c b/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c
index 00378f7..92eae6a 100644
--- a/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c
+++ b/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c
@@ -86,7 +86,7 @@ int check_load(st1 *s1) {
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st1>>, !cir.ptr<!rec_st1>
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "b"} : !cir.ptr<!rec_st1> -> !cir.ptr<!u16i>
// CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_b, [[MEMBER]] {is_volatile} : !cir.ptr<!u16i>) -> !u32i
-// CIR: [[CAST:%.*]] = cir.cast(integral, [[BITFI]] : !u32i), !s32i
+// CIR: [[CAST:%.*]] = cir.cast integral [[BITFI]] : !u32i -> !s32i
// CIR: cir.store [[CAST]], [[RETVAL:%.*]] : !s32i, !cir.ptr<!s32i>
// CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.return [[RET]] : !s32i
@@ -118,7 +118,7 @@ int check_load_exception(st3 *s3) {
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st3>>, !cir.ptr<!rec_st3>
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr<!rec_st3> -> !cir.ptr<!u8i>
// CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_b1, [[MEMBER]] {is_volatile} : !cir.ptr<!u8i>) -> !u32i
-// CIR: [[CAST:%.*]] = cir.cast(integral, [[BITFI]] : !u32i), !s32i
+// CIR: [[CAST:%.*]] = cir.cast integral [[BITFI]] : !u32i -> !s32i
// CIR: cir.store [[CAST]], [[RETVAL:%.*]] : !s32i, !cir.ptr<!s32i>
// CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.return [[RET]] : !s32i
@@ -180,7 +180,7 @@ void check_store(st2 *s2) {
// CIR: cir.func dso_local @check_store
// CIR: [[CONST:%.*]] = cir.const #cir.int<1> : !s32i
-// CIR: [[CAST:%.*]] = cir.cast(integral, [[CONST]] : !s32i), !s16i
+// CIR: [[CAST:%.*]] = cir.cast integral [[CONST]] : !s32i -> !s16i
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st2>>, !cir.ptr<!rec_st2>
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr<!rec_st2> -> !cir.ptr<!u32i>
// CIR: [[SETBF:%.*]] = cir.set_bitfield align(8) (#bfi_a, [[MEMBER]] : !cir.ptr<!u32i>, [[CAST]] : !s16i) {is_volatile} -> !s16i
@@ -211,7 +211,7 @@ void check_store_exception(st3 *s3) {
// CIR: cir.func dso_local @check_store_exception
// CIR: [[CONST:%.*]] = cir.const #cir.int<2> : !s32i
-// CIR: [[CAST:%.*]] = cir.cast(integral, [[CONST]] : !s32i), !u32i
+// CIR: [[CAST:%.*]] = cir.cast integral [[CONST]] : !s32i -> !u32i
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st3>>, !cir.ptr<!rec_st3>
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr<!rec_st3> -> !cir.ptr<!u8i>
// CIR: [[SETBF:%.*]] = cir.set_bitfield align(4) (#bfi_b1, [[MEMBER]] : !cir.ptr<!u8i>, [[CAST]] : !u32i) {is_volatile} -> !u32i
@@ -263,7 +263,7 @@ void check_store_second_member (st4 *s4) {
// CIR: cir.func dso_local @check_store_second_member
// CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i
-// CIR: [[CAST:%.*]] = cir.cast(integral, [[ONE]] : !s32i), !u64i
+// CIR: [[CAST:%.*]] = cir.cast integral [[ONE]] : !s32i -> !u64i
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st4>>, !cir.ptr<!rec_st4>
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr<!rec_st4> -> !cir.ptr<!u16i>
// CIR: cir.set_bitfield align(8) (#bfi_b2, [[MEMBER]] : !cir.ptr<!u16i>, [[CAST]] : !u64i) {is_volatile} -> !u64i
diff --git a/clang/test/CIR/CodeGen/array-ctor.cpp b/clang/test/CIR/CodeGen/array-ctor.cpp
index bad4868..5583d9d 100644
--- a/clang/test/CIR/CodeGen/array-ctor.cpp
+++ b/clang/test/CIR/CodeGen/array-ctor.cpp
@@ -27,7 +27,7 @@ void foo() {
// CIR: cir.func dso_local @_Z3foov()
// CIR: %[[ARRAY:.*]] = cir.alloca !cir.array<!rec_S x 42>, !cir.ptr<!cir.array<!rec_S x 42>>, ["s", init]
// CIR: %[[CONST42:.*]] = cir.const #cir.int<42> : !u64i
-// CIR: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARRAY]] : !cir.ptr<!cir.array<!rec_S x 42>>), !cir.ptr<!rec_S>
+// CIR: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARRAY]] : !cir.ptr<!cir.array<!rec_S x 42>> -> !cir.ptr<!rec_S>
// CIR: %[[END_PTR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_S>, %[[CONST42]] : !u64i), !cir.ptr<!rec_S>
// CIR: %[[ITER:.*]] = cir.alloca !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>, ["__array_idx"]
// CIR: cir.store %[[DECAY]], %[[ITER]] : !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>
@@ -111,7 +111,7 @@ void multi_dimensional() {
// CIR-BEFORE-LPP: cir.func{{.*}} @_Z17multi_dimensionalv()
// CIR-BEFORE-LPP: %[[S:.*]] = cir.alloca !cir.array<!cir.array<!rec_S x 5> x 3>, !cir.ptr<!cir.array<!cir.array<!rec_S x 5> x 3>>, ["s", init]
-// CIR-BEFORE-LPP: %[[FLAT:.*]] = cir.cast(bitcast, %[[S]] : !cir.ptr<!cir.array<!cir.array<!rec_S x 5> x 3>>), !cir.ptr<!cir.array<!rec_S x 15>>
+// CIR-BEFORE-LPP: %[[FLAT:.*]] = cir.cast bitcast %[[S]] : !cir.ptr<!cir.array<!cir.array<!rec_S x 5> x 3>> -> !cir.ptr<!cir.array<!rec_S x 15>>
// CIR-BEFORE-LPP: cir.array.ctor %[[FLAT]] : !cir.ptr<!cir.array<!rec_S x 15>> {
// CIR-BEFORE-LPP: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_S>):
// CIR-BEFORE-LPP: cir.call @_ZN1SC1Ev(%[[ARG]]) : (!cir.ptr<!rec_S>) -> ()
@@ -122,7 +122,7 @@ void multi_dimensional() {
// CIR: cir.func{{.*}} @_Z17multi_dimensionalv()
// CIR: %[[S:.*]] = cir.alloca !cir.array<!cir.array<!rec_S x 5> x 3>, !cir.ptr<!cir.array<!cir.array<!rec_S x 5> x 3>>, ["s", init]
// CIR: %[[CONST15:.*]] = cir.const #cir.int<15> : !u64i
-// CIR: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, {{.*}} : !cir.ptr<!cir.array<!rec_S x 15>>), !cir.ptr<!rec_S>
+// CIR: %[[DECAY:.*]] = cir.cast array_to_ptrdecay {{.*}} : !cir.ptr<!cir.array<!rec_S x 15>> -> !cir.ptr<!rec_S>
// CIR: %[[END_PTR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_S>, %[[CONST15]] : !u64i), !cir.ptr<!rec_S>
// CIR: %[[ITER:.*]] = cir.alloca !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>, ["__array_idx"]
// CIR: cir.store %[[DECAY]], %[[ITER]] : !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>
diff --git a/clang/test/CIR/CodeGen/array-dtor.cpp b/clang/test/CIR/CodeGen/array-dtor.cpp
index 36db265..e969d50 100644
--- a/clang/test/CIR/CodeGen/array-dtor.cpp
+++ b/clang/test/CIR/CodeGen/array-dtor.cpp
@@ -26,7 +26,7 @@ void test_cleanup_array() {
// CIR: cir.func{{.*}} @_Z18test_cleanup_arrayv()
// CIR: %[[S:.*]] = cir.alloca !cir.array<!rec_S x 42>, !cir.ptr<!cir.array<!rec_S x 42>>, ["s"]
// CIR: %[[CONST41:.*]] = cir.const #cir.int<41> : !u64i
-// CIR: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[S]] : !cir.ptr<!cir.array<!rec_S x 42>>), !cir.ptr<!rec_S>
+// CIR: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[S]] : !cir.ptr<!cir.array<!rec_S x 42>> -> !cir.ptr<!rec_S>
// CIR: %[[END_PTR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_S>, %[[CONST41]] : !u64i), !cir.ptr<!rec_S>
// CIR: %[[ITER:.*]] = cir.alloca !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>, ["__array_idx"]
// CIR: cir.store %[[END_PTR]], %[[ITER]] : !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>
@@ -109,7 +109,7 @@ void multi_dimensional() {
// CIR-BEFORE-LPP: cir.func{{.*}} @_Z17multi_dimensionalv()
// CIR-BEFORE-LPP: %[[S:.*]] = cir.alloca !cir.array<!cir.array<!rec_S x 5> x 3>, !cir.ptr<!cir.array<!cir.array<!rec_S x 5> x 3>>, ["s"]
-// CIR-BEFORE-LPP: %[[FLAT:.*]] = cir.cast(bitcast, %[[S]] : !cir.ptr<!cir.array<!cir.array<!rec_S x 5> x 3>>), !cir.ptr<!cir.array<!rec_S x 15>>
+// CIR-BEFORE-LPP: %[[FLAT:.*]] = cir.cast bitcast %[[S]] : !cir.ptr<!cir.array<!cir.array<!rec_S x 5> x 3>> -> !cir.ptr<!cir.array<!rec_S x 15>>
// CIR-BEFORE-LPP: cir.array.dtor %[[FLAT]] : !cir.ptr<!cir.array<!rec_S x 15>> {
// CIR-BEFORE-LPP: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_S>):
// CIR-BEFORE-LPP: cir.call @_ZN1SD1Ev(%[[ARG]]) nothrow : (!cir.ptr<!rec_S>) -> ()
@@ -119,9 +119,9 @@ void multi_dimensional() {
// CIR: cir.func{{.*}} @_Z17multi_dimensionalv()
// CIR: %[[S:.*]] = cir.alloca !cir.array<!cir.array<!rec_S x 5> x 3>, !cir.ptr<!cir.array<!cir.array<!rec_S x 5> x 3>>, ["s"]
-// CIR: %[[FLAT:.*]] = cir.cast(bitcast, %[[S]] : !cir.ptr<!cir.array<!cir.array<!rec_S x 5> x 3>>), !cir.ptr<!cir.array<!rec_S x 15>>
+// CIR: %[[FLAT:.*]] = cir.cast bitcast %[[S]] : !cir.ptr<!cir.array<!cir.array<!rec_S x 5> x 3>> -> !cir.ptr<!cir.array<!rec_S x 15>>
// CIR: %[[CONST14:.*]] = cir.const #cir.int<14> : !u64i
-// CIR: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[FLAT]] : !cir.ptr<!cir.array<!rec_S x 15>>), !cir.ptr<!rec_S>
+// CIR: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[FLAT]] : !cir.ptr<!cir.array<!rec_S x 15>> -> !cir.ptr<!rec_S>
// CIR: %[[END_PTR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_S>, %[[CONST14]] : !u64i), !cir.ptr<!rec_S>
// CIR: %[[ITER:.*]] = cir.alloca !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>, ["__array_idx"]
// CIR: cir.store %[[END_PTR]], %[[ITER]] : !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>
diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp
index 5dac104..3333634 100644
--- a/clang/test/CIR/CodeGen/array.cpp
+++ b/clang/test/CIR/CodeGen/array.cpp
@@ -113,12 +113,12 @@ void func() {
// CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
// CIR: %[[INIT_2:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e2", init]
// CIR: %[[IDX:.*]] = cir.const #cir.int<0> : !s32i
-// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 10>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!s32i x 10>> -> !cir.ptr<!s32i>
// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!s32i>, %[[IDX]] : !s32i), !cir.ptr<!s32i>
// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[ELE_PTR]] : !cir.ptr<!s32i>, !s32i
// CIR" cir.store %[[TMP]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[IDX:.*]] = cir.const #cir.int<1> : !s32i
-// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 10>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!s32i x 10>> -> !cir.ptr<!s32i>
// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!s32i>, %[[IDX]] : !s32i), !cir.ptr<!s32i>
// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[ELE_PTR]] : !cir.ptr<!s32i>, !s32i
// CIR" cir.store %[[TMP]], %[[INIT_2]] : !s32i, !cir.ptr<!s32i>
@@ -152,7 +152,7 @@ void func2() {
// CIR: %[[ARR2:.*]] = cir.alloca !cir.array<!s32i x 2>, !cir.ptr<!cir.array<!s32i x 2>>, ["arr", init]
// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp", init]
-// CIR: %[[ARR_0:.*]] = cir.cast(array_to_ptrdecay, %[[ARR2]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_0:.*]] = cir.cast array_to_ptrdecay %[[ARR2]] : !cir.ptr<!cir.array<!s32i x 2>> -> !cir.ptr<!s32i>
// CIR: %[[FIVE:.*]] = cir.const #cir.int<5> : !s32i
// CIR: cir.store{{.*}} %[[FIVE]], %[[ARR_0]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[OFFSET_0:.*]] = cir.const #cir.int<1> : !s64i
@@ -209,7 +209,7 @@ void func3() {
// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!s32i x 2>, !cir.ptr<!cir.array<!s32i x 2>>, ["arr", init]
// CIR: %[[IDX:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["idx", init]
// CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
-// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!s32i x 2>> -> !cir.ptr<!s32i>
// CIR: %[[V0:.*]] = cir.const #cir.int<5> : !s32i
// CIR: cir.store{{.*}} %[[V0]], %[[ARR_PTR]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[OFFSET_0:.*]] = cir.const #cir.int<1> : !s64i
@@ -219,7 +219,7 @@ void func3() {
// CIR: %[[IDX_V:.*]] = cir.const #cir.int<1> : !s32i
// CIR: cir.store{{.*}} %[[IDX_V]], %[[IDX]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[TMP_IDX:.*]] = cir.load{{.*}} %[[IDX]] : !cir.ptr<!s32i>, !s32i
-// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!s32i x 2>> -> !cir.ptr<!s32i>
// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!s32i>, %[[TMP_IDX]] : !s32i), !cir.ptr<!s32i>
// CIR: %[[ELE_TMP:.*]] = cir.load{{.*}} %[[ELE_PTR]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store{{.*}} %[[ELE_TMP]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
@@ -258,20 +258,20 @@ void func4() {
// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!cir.array<!s32i x 1> x 2>, !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>, ["arr", init]
// CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
-// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>), !cir.ptr<!cir.array<!s32i x 1>>
-// CIR: %[[ARR_0_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>> -> !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %[[ARR_0_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>> -> !cir.ptr<!s32i>
// CIR: %[[V_0_0:.*]] = cir.const #cir.int<5> : !s32i
// CIR: cir.store{{.*}} %[[V_0_0]], %[[ARR_0_PTR]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
// CIR: %[[ARR_1:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, %[[OFFSET]] : !s64i), !cir.ptr<!cir.array<!s32i x 1>>
-// CIR: %[[ARR_1_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_1]] : !cir.ptr<!cir.array<!s32i x 1>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_1_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_1]] : !cir.ptr<!cir.array<!s32i x 1>> -> !cir.ptr<!s32i>
// CIR: %[[V_1_0:.*]] = cir.const #cir.int<6> : !s32i
// CIR: cir.store{{.*}} %[[V_1_0]], %[[ARR_1_PTR]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[IDX:.*]] = cir.const #cir.int<0> : !s32i
// CIR: %[[IDX_1:.*]] = cir.const #cir.int<1> : !s32i
-// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>), !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>> -> !cir.ptr<!cir.array<!s32i x 1>>
// CIR: %[[ARR_1:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!cir.array<!s32i x 1>>, %[[IDX_1]] : !s32i), !cir.ptr<!cir.array<!s32i x 1>>
-// CIR: %[[ARR_1_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_1]] : !cir.ptr<!cir.array<!s32i x 1>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_1_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_1]] : !cir.ptr<!cir.array<!s32i x 1>> -> !cir.ptr<!s32i>
// CIR: %[[ELE_0:.*]] = cir.ptr_stride(%[[ARR_1_PTR]] : !cir.ptr<!s32i>, %[[IDX]] : !s32i), !cir.ptr<!s32i>
// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[ELE_0]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store{{.*}} %[[TMP]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
@@ -306,8 +306,8 @@ void func5() {
// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!cir.array<!s32i x 1> x 2>, !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>, ["arr", init]
// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr<!cir.array<!s32i x 1>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 1>>>, ["arrayinit.temp", init]
-// CIR: %[[ARR_0:.*]] = cir.cast(array_to_ptrdecay, %0 : !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>>), !cir.ptr<!cir.array<!s32i x 1>>
-// CIR: %[[ARR_0_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_0]] : !cir.ptr<!cir.array<!s32i x 1>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_0:.*]] = cir.cast array_to_ptrdecay %0 : !cir.ptr<!cir.array<!cir.array<!s32i x 1> x 2>> -> !cir.ptr<!cir.array<!s32i x 1>>
+// CIR: %[[ARR_0_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_0]] : !cir.ptr<!cir.array<!s32i x 1>> -> !cir.ptr<!s32i>
// CIR: %[[V_0_0:.*]] = cir.const #cir.int<5> : !s32i
// CIR: cir.store{{.*}} %[[V_0_0]], %[[ARR_0_PTR]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
@@ -364,7 +364,7 @@ void func6() {
// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!s32i x 2>, !cir.ptr<!cir.array<!s32i x 2>>, ["arr", init]
// CIR: %[[V:.*]] = cir.const #cir.int<4> : !s32i
// CIR: cir.store{{.*}} %[[V]], %[[VAR]] : !s32i, !cir.ptr<!s32i>
-// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 2>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!s32i x 2>> -> !cir.ptr<!s32i>
// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[VAR]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store{{.*}} %[[TMP]], %[[ARR_PTR]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i
@@ -396,7 +396,7 @@ void func7() {
// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!cir.ptr<!s32i> x 1>, !cir.ptr<!cir.array<!cir.ptr<!s32i> x 1>>, ["arr", init]
// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, ["arrayinit.temp", init]
-// CIR: %[[ARR_0:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 1>>), !cir.ptr<!cir.ptr<!s32i>>
+// CIR: %[[ARR_0:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 1>> -> !cir.ptr<!cir.ptr<!s32i>>
// CIR: cir.store{{.*}} %[[ARR_0]], %[[ARR_PTR]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
// CIR: %[[ARR_END:.*]] = cir.ptr_stride(%[[ARR_0]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ONE]] : !s64i), !cir.ptr<!cir.ptr<!s32i>>
@@ -497,7 +497,7 @@ void func9(int arr[10][5]) {
// CIR: %[[IDX_1:.*]] = cir.const #cir.int<1> : !s32i
// CIR: %[[TMP_1:.*]] = cir.load{{.*}} %[[ARR]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, !cir.ptr<!cir.array<!s32i x 5>>
// CIR: %[[ARR_1:.*]] = cir.ptr_stride(%[[TMP_1]] : !cir.ptr<!cir.array<!s32i x 5>>, %[[IDX_1]] : !s32i), !cir.ptr<!cir.array<!s32i x 5>>
-// CIR: %[[ARR_1_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_1]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_1_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_1]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CIR: %[[ARR_1_2:.*]] = cir.ptr_stride(%[[ARR_1_PTR]] : !cir.ptr<!s32i>, %[[IDX]] : !s32i), !cir.ptr<!s32i>
// CIR: %[[TMP_2:.*]] = cir.load{{.*}} %[[ARR_1_2]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store{{.*}} %[[TMP_2]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
@@ -581,7 +581,7 @@ void array_with_complex_elements() {
}
// CIR: %[[ARR_ADDR:.*]] = cir.alloca !cir.array<!cir.complex<!cir.float> x 2>, !cir.ptr<!cir.array<!cir.complex<!cir.float> x 2>>, ["arr", init]
-// CIR: %[[ARR_0:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_ADDR]] : !cir.ptr<!cir.array<!cir.complex<!cir.float> x 2>>), !cir.ptr<!cir.complex<!cir.float>>
+// CIR: %[[ARR_0:.*]] = cir.cast array_to_ptrdecay %[[ARR_ADDR]] : !cir.ptr<!cir.array<!cir.complex<!cir.float> x 2>> -> !cir.ptr<!cir.complex<!cir.float>>
// CIR: %[[CONST_COMPLEX_0:.*]] = cir.const #cir.const_complex<#cir.fp<1.100000e+00> : !cir.float, #cir.fp<2.200000e+00> : !cir.float> : !cir.complex<!cir.float>
// CIR: cir.store{{.*}} %[[CONST_COMPLEX_0]], %[[ARR_0]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
// CIR: %[[IDX_1:.*]] = cir.const #cir.int<1> : !s64i
diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp
index 3e509f5..1089d4b 100644
--- a/clang/test/CIR/CodeGen/assign-operator.cpp
+++ b/clang/test/CIR/CodeGen/assign-operator.cpp
@@ -17,7 +17,7 @@ void a() {
// CIR: cir.func{{.*}} @_Z1av()
// CIR: %[[A_ADDR:.*]] = cir.alloca !rec_x, !cir.ptr<!rec_x>, ["a"]
// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i
-// CIR: %[[ONE_CAST:.*]] = cir.cast(integral, %[[ONE]] : !u32i), !s32i
+// CIR: %[[ONE_CAST:.*]] = cir.cast integral %[[ONE]] : !u32i -> !s32i
// CIR: %[[RET:.*]] = cir.call @_ZN1xaSEi(%[[A_ADDR]], %[[ONE_CAST]]) : (!cir.ptr<!rec_x>, !s32i) -> !s32i
// LLVM: define{{.*}} @_Z1av()
@@ -75,10 +75,10 @@ void copy_c(C &c1, C &c2) {
// CIR: %[[A_MEMBER_2:.*]] = cir.get_member %[[ARG1_LOAD]][0] {name = "a"}
// CIR: %[[C_A:.*]] = cir.call @_ZN1AaSERKS_(%[[A_MEMBER]], %[[A_MEMBER_2]])
// CIR: %[[B_MEMBER:.*]] = cir.get_member %[[THIS]][1] {name = "b"}
-// CIR: %[[B_VOID_PTR:.*]] = cir.cast(bitcast, %[[B_MEMBER]] : !cir.ptr<!cir.array<!rec_B x 16>>), !cir.ptr<!void>
+// CIR: %[[B_VOID_PTR:.*]] = cir.cast bitcast %[[B_MEMBER]] : !cir.ptr<!cir.array<!rec_B x 16>> -> !cir.ptr<!void>
// CIR: %[[RET_LOAD:.*]] = cir.load %[[ARG1_ADDR]]
// CIR: %[[B_MEMBER_2:.*]] = cir.get_member %[[RET_LOAD]][1] {name = "b"}
-// CIR: %[[B_VOID_PTR_2:.*]] = cir.cast(bitcast, %[[B_MEMBER_2]] : !cir.ptr<!cir.array<!rec_B x 16>>), !cir.ptr<!void>
+// CIR: %[[B_VOID_PTR_2:.*]] = cir.cast bitcast %[[B_MEMBER_2]] : !cir.ptr<!cir.array<!rec_B x 16>> -> !cir.ptr<!void>
// CIR: %[[SIZE:.*]] = cir.const #cir.int<64> : !u64i
// CIR: %[[COUNT:.*]] = cir.call @memcpy(%[[B_VOID_PTR]], %[[B_VOID_PTR_2]], %[[SIZE]])
// CIR: cir.store %[[THIS]], %[[RET_ADDR]]
diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c
index 2c3c5b0..9268615 100644
--- a/clang/test/CIR/CodeGen/basic.c
+++ b/clang/test/CIR/CodeGen/basic.c
@@ -296,7 +296,7 @@ size_type max_size(void) {
// CIR: %0 = cir.alloca !u64i, !cir.ptr<!u64i>, ["__retval"] {alignment = 8 : i64}
// CIR: %1 = cir.const #cir.int<0> : !s32i
// CIR: %2 = cir.unary(not, %1) : !s32i, !s32i
-// CIR: %3 = cir.cast(integral, %2 : !s32i), !u64i
+// CIR: %3 = cir.cast integral %2 : !s32i -> !u64i
// CIR: %4 = cir.const #cir.int<8> : !u64i
// CIR: %5 = cir.binop(div, %3, %4) : !u64i
diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp
index fe6dd93..af8de6f 100644
--- a/clang/test/CIR/CodeGen/basic.cpp
+++ b/clang/test/CIR/CodeGen/basic.cpp
@@ -124,7 +124,7 @@ size_type max_size() {
// CHECK: %0 = cir.alloca !u64i, !cir.ptr<!u64i>, ["__retval"] {alignment = 8 : i64}
// CHECK: %1 = cir.const #cir.int<0> : !s32i
// CHECK: %2 = cir.unary(not, %1) : !s32i, !s32i
-// CHECK: %3 = cir.cast(integral, %2 : !s32i), !u64i
+// CHECK: %3 = cir.cast integral %2 : !s32i -> !u64i
// CHECK: %4 = cir.const #cir.int<8> : !u64i
// CHECK: %5 = cir.binop(div, %3, %4) : !u64i
// CHECK: cir.store{{.*}} %5, %0 : !u64i, !cir.ptr<!u64i>
diff --git a/clang/test/CIR/CodeGen/binassign.c b/clang/test/CIR/CodeGen/binassign.c
index 541b50a..65bea4d 100644
--- a/clang/test/CIR/CodeGen/binassign.c
+++ b/clang/test/CIR/CodeGen/binassign.c
@@ -25,7 +25,7 @@ void binary_assign(void) {
// CIR: %[[TRUE:.*]] = cir.const #true
// CIR: cir.store{{.*}} %[[TRUE]], %[[B]] : !cir.bool, !cir.ptr<!cir.bool>
// CIR: %[[CHAR_INI_INIT:.*]] = cir.const #cir.int<65> : !s32i
-// CIR: %[[CHAR_VAL:.*]] = cir.cast(integral, %[[CHAR_INI_INIT]] : !s32i), !s8i
+// CIR: %[[CHAR_VAL:.*]] = cir.cast integral %[[CHAR_INI_INIT]] : !s32i -> !s8i
// CIR: cir.store{{.*}} %[[CHAR_VAL]], %[[C]] : !s8i, !cir.ptr<!s8i>
// CIR: %[[FLOAT_VAL:.*]] = cir.const #cir.fp<3.140000e+00> : !cir.float
// CIR: cir.store{{.*}} %[[FLOAT_VAL]], %[[F]] : !cir.float, !cir.ptr<!cir.float>
diff --git a/clang/test/CIR/CodeGen/binop.c b/clang/test/CIR/CodeGen/binop.c
index 280fd29..4427e4b 100644
--- a/clang/test/CIR/CodeGen/binop.c
+++ b/clang/test/CIR/CodeGen/binop.c
@@ -5,9 +5,9 @@ void conditionalResultIimplicitCast(int a, int b, float f) {
// Should implicit cast back to int.
int x = a && b;
// CHECK: %[[#INT:]] = cir.ternary
- // CHECK: %{{.+}} = cir.cast(bool_to_int, %[[#INT]] : !cir.bool), !s32i
+ // CHECK: %{{.+}} = cir.cast bool_to_int %[[#INT]] : !cir.bool -> !s32i
float y = f && f;
// CHECK: %[[#BOOL:]] = cir.ternary
- // CHECK: %[[#INT:]] = cir.cast(bool_to_int, %[[#BOOL]] : !cir.bool), !s32i
- // CHECK: %{{.+}} = cir.cast(int_to_float, %[[#INT]] : !s32i), !cir.float
+ // CHECK: %[[#INT:]] = cir.cast bool_to_int %[[#BOOL]] : !cir.bool -> !s32i
+ // CHECK: %{{.+}} = cir.cast int_to_float %[[#INT]] : !s32i -> !cir.float
}
diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp
index 847e817..c1a432d 100644
--- a/clang/test/CIR/CodeGen/binop.cpp
+++ b/clang/test/CIR/CodeGen/binop.cpp
@@ -337,13 +337,13 @@ void zext_shift_example(int a, unsigned char b) {
// CIR: %[[A1:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr<!s32i>, !s32i
// CIR: %[[B1:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr<!u8i>, !u8i
-// CIR: %[[B1_EXT:.*]] = cir.cast(integral, %[[B1]] : !u8i), !s32i
+// CIR: %[[B1_EXT:.*]] = cir.cast integral %[[B1]] : !u8i -> !s32i
// CIR: %[[ASHR:.*]] = cir.shift(right, %[[A1]] : !s32i, %[[B1_EXT]] : !s32i) -> !s32i
// CIR: cir.store{{.*}} %[[ASHR]], %[[X_PTR]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[A2:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr<!s32i>, !s32i
// CIR: %[[B2:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr<!u8i>, !u8i
-// CIR: %[[B2_EXT:.*]] = cir.cast(integral, %[[B2]] : !u8i), !s32i
+// CIR: %[[B2_EXT:.*]] = cir.cast integral %[[B2]] : !u8i -> !s32i
// CIR: %[[SHL:.*]] = cir.shift(left, %[[A2]] : !s32i, %[[B2_EXT]] : !s32i) -> !s32i
// CIR: cir.store{{.*}} %[[SHL]], %[[X_PTR]] : !s32i, !cir.ptr<!s32i>
@@ -409,13 +409,13 @@ void sext_shift_example(int a, signed char b) {
// CIR: %[[A1:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr<!s32i>, !s32i
// CIR: %[[B1:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr<!s8i>, !s8i
-// CIR: %[[B1_EXT:.*]] = cir.cast(integral, %[[B1]] : !s8i), !s32i
+// CIR: %[[B1_EXT:.*]] = cir.cast integral %[[B1]] : !s8i -> !s32i
// CIR: %[[ASHR:.*]] = cir.shift(right, %[[A1]] : !s32i, %[[B1_EXT]] : !s32i) -> !s32i
// CIR: cir.store{{.*}} %[[ASHR]], %[[X_PTR]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[A2:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr<!s32i>, !s32i
// CIR: %[[B2:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr<!s8i>, !s8i
-// CIR: %[[B2_EXT:.*]] = cir.cast(integral, %[[B2]] : !s8i), !s32i
+// CIR: %[[B2_EXT:.*]] = cir.cast integral %[[B2]] : !s8i -> !s32i
// CIR: %[[SHL:.*]] = cir.shift(left, %[[A2]] : !s32i, %[[B2_EXT]] : !s32i) -> !s32i
// CIR: cir.store{{.*}} %[[SHL]], %[[X_PTR]] : !s32i, !cir.ptr<!s32i>
@@ -481,13 +481,13 @@ void long_shift_example(long long a, short b) {
// CIR: %[[A1:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr<!s64i>, !s64i
// CIR: %[[B1:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr<!s16i>, !s16i
-// CIR: %[[B1_EXT:.*]] = cir.cast(integral, %[[B1]] : !s16i), !s32i
+// CIR: %[[B1_EXT:.*]] = cir.cast integral %[[B1]] : !s16i -> !s32i
// CIR: %[[ASHR:.*]] = cir.shift(right, %[[A1]] : !s64i, %[[B1_EXT]] : !s32i) -> !s64i
// CIR: cir.store{{.*}} %[[ASHR]], %[[X_PTR]] : !s64i, !cir.ptr<!s64i>
// CIR: %[[A2:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr<!s64i>, !s64i
// CIR: %[[B2:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr<!s16i>, !s16i
-// CIR: %[[B2_EXT:.*]] = cir.cast(integral, %[[B2]] : !s16i), !s32i
+// CIR: %[[B2_EXT:.*]] = cir.cast integral %[[B2]] : !s16i -> !s32i
// CIR: %[[SHL:.*]] = cir.shift(left, %[[A2]] : !s64i, %[[B2_EXT]] : !s32i) -> !s64i
// CIR: cir.store{{.*}} %[[SHL]], %[[X_PTR]] : !s64i, !cir.ptr<!s64i>
diff --git a/clang/test/CIR/CodeGen/builtin_bit.cpp b/clang/test/CIR/CodeGen/builtin_bit.cpp
index 8b9a187..32a53d8 100644
--- a/clang/test/CIR/CodeGen/builtin_bit.cpp
+++ b/clang/test/CIR/CodeGen/builtin_bit.cpp
@@ -34,7 +34,7 @@ int test_builtin_clrsbl(long x) {
// CIR-LABEL: _Z19test_builtin_clrsbll
// CIR: [[TMP:%.+]] = cir.clrsb %{{.+}} : !s64i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !s64i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !s64i -> !s32i
// LLVM-LABEL: _Z19test_builtin_clrsbll
// LLVM: %[[X:.+]] = load i64, ptr %{{.+}}, align 8
@@ -58,7 +58,7 @@ int test_builtin_clrsbll(long long x) {
// CIR-LABEL: _Z20test_builtin_clrsbllx
// CIR: [[TMP:%.+]] = cir.clrsb %{{.+}} : !s64i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !s64i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !s64i -> !s32i
// LLVM-LABEL: _Z20test_builtin_clrsbllx
// LLVM: %[[X:.+]] = load i64, ptr %{{.+}}, align 8
@@ -82,7 +82,7 @@ int test_builtin_ctzs(unsigned short x) {
// CIR-LABEL: _Z17test_builtin_ctzst
// CIR: [[TMP:%.+]] = cir.ctz %{{.+}} poison_zero : !u16i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u16i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u16i -> !s32i
// LLVM-LABEL: _Z17test_builtin_ctzst
// LLVM: %{{.+}} = call i16 @llvm.cttz.i16(i16 %{{.+}}, i1 true)
@@ -96,7 +96,7 @@ int test_builtin_ctz(unsigned x) {
// CIR-LABEL: _Z16test_builtin_ctzj
// CIR: [[TMP:%.+]] = cir.ctz %{{.+}} poison_zero : !u32i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i
// LLVM-LABEL: _Z16test_builtin_ctzj
// LLVM: %{{.+}} = call i32 @llvm.cttz.i32(i32 %{{.+}}, i1 true)
@@ -110,7 +110,7 @@ int test_builtin_ctzl(unsigned long x) {
// CIR-LABEL: _Z17test_builtin_ctzlm
// CIR: [[TMP:%.+]] = cir.ctz %{{.+}} poison_zero : !u64i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i
// LLVM-LABEL: _Z17test_builtin_ctzlm
// LLVM: %{{.+}} = call i64 @llvm.cttz.i64(i64 %{{.+}}, i1 true)
@@ -124,7 +124,7 @@ int test_builtin_ctzll(unsigned long long x) {
// CIR-LABEL: _Z18test_builtin_ctzlly
// CIR: [[TMP:%.+]] = cir.ctz %{{.+}} poison_zero : !u64i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i
// LLVM-LABEL: _Z18test_builtin_ctzlly
// LLVM: %{{.+}} = call i64 @llvm.cttz.i64(i64 %{{.+}}, i1 true)
@@ -138,7 +138,7 @@ int test_builtin_ctzg(unsigned x) {
// CIR-LABEL: _Z17test_builtin_ctzgj
// CIR: [[TMP:%.+]] = cir.ctz %{{.+}} poison_zero : !u32i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i
// LLVM-LABEL: _Z17test_builtin_ctzgj
// LLVM: %{{.+}} = call i32 @llvm.cttz.i32(i32 %{{.+}}, i1 true)
@@ -152,7 +152,7 @@ int test_builtin_clzs(unsigned short x) {
// CIR-LABEL: _Z17test_builtin_clzst
// CIR: [[TMP:%.+]] = cir.clz %{{.+}} poison_zero : !u16i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u16i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u16i -> !s32i
// LLVM-LABEL: _Z17test_builtin_clzst
// LLVM: %{{.+}} = call i16 @llvm.ctlz.i16(i16 %{{.+}}, i1 true)
@@ -166,7 +166,7 @@ int test_builtin_clz(unsigned x) {
// CIR-LABEL: _Z16test_builtin_clzj
// CIR: [[TMP:%.+]] = cir.clz %{{.+}} poison_zero : !u32i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i
// LLVM-LABEL: _Z16test_builtin_clzj
// LLVM: %{{.+}} = call i32 @llvm.ctlz.i32(i32 %{{.+}}, i1 true)
@@ -180,7 +180,7 @@ int test_builtin_clzl(unsigned long x) {
// CIR-LABEL: _Z17test_builtin_clzlm
// CIR: [[TMP:%.+]] = cir.clz %{{.+}} poison_zero : !u64i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i
// LLVM-LABEL: _Z17test_builtin_clzlm
// LLVM: %{{.+}} = call i64 @llvm.ctlz.i64(i64 %{{.+}}, i1 true)
@@ -194,7 +194,7 @@ int test_builtin_clzll(unsigned long long x) {
// CIR-LABEL: _Z18test_builtin_clzlly
// CIR: [[TMP:%.+]] = cir.clz %{{.+}} poison_zero : !u64i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i
// LLVM-LABEL: _Z18test_builtin_clzlly
// LLVM: %{{.+}} = call i64 @llvm.ctlz.i64(i64 %{{.+}}, i1 true)
@@ -208,7 +208,7 @@ int test_builtin_clzg(unsigned x) {
// CIR-LABEL: _Z17test_builtin_clzgj
// CIR: [[TMP:%.+]] = cir.clz %{{.+}} poison_zero : !u32i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i
// LLVM-LABEL: _Z17test_builtin_clzgj
// LLVM: %{{.+}} = call i32 @llvm.ctlz.i32(i32 %{{.+}}, i1 true)
@@ -294,7 +294,7 @@ int test_builtin_parity(unsigned x) {
// CIR-LABEL: _Z19test_builtin_parityj
// CIR: [[TMP:%.+]] = cir.parity %{{.+}} : !u32i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i
// LLVM-LABEL: _Z19test_builtin_parityj
// LLVM: %[[X:.+]] = load i32, ptr %{{.+}}, align 4
@@ -312,7 +312,7 @@ int test_builtin_parityl(unsigned long x) {
// CIR-LABEL: _Z20test_builtin_paritylm
// CIR: [[TMP:%.+]] = cir.parity %{{.+}} : !u64i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i
// LLVM-LABEL: _Z20test_builtin_paritylm
// LLVM: %[[X:.+]] = load i64, ptr %{{.+}}, align 8
@@ -330,7 +330,7 @@ int test_builtin_parityll(unsigned long long x) {
// CIR-LABEL: _Z21test_builtin_paritylly
// CIR: [[TMP:%.+]] = cir.parity %{{.+}} : !u64i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i
// LLVM-LABEL: _Z21test_builtin_paritylly
// LLVM: %[[X:.+]] = load i64, ptr %{{.+}}, align 8
@@ -348,7 +348,7 @@ int test_builtin_popcount(unsigned x) {
// CIR-LABEL: _Z21test_builtin_popcountj
// CIR: [[TMP:%.+]] = cir.popcount %{{.+}} : !u32i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i
// LLVM-LABEL: _Z21test_builtin_popcountj
// LLVM: %{{.+}} = call i32 @llvm.ctpop.i32(i32 %{{.+}})
@@ -362,7 +362,7 @@ int test_builtin_popcountl(unsigned long x) {
// CIR-LABEL: _Z22test_builtin_popcountlm
// CIR: [[TMP:%.+]] = cir.popcount %{{.+}} : !u64i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i
// LLVM-LABEL: _Z22test_builtin_popcountlm
// LLVM: %{{.+}} = call i64 @llvm.ctpop.i64(i64 %{{.+}})
@@ -376,7 +376,7 @@ int test_builtin_popcountll(unsigned long long x) {
// CIR-LABEL: _Z23test_builtin_popcountlly
// CIR: [[TMP:%.+]] = cir.popcount %{{.+}} : !u64i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i
// LLVM-LABEL: _Z23test_builtin_popcountlly
// LLVM: %{{.+}} = call i64 @llvm.ctpop.i64(i64 %{{.+}})
@@ -390,7 +390,7 @@ int test_builtin_popcountg(unsigned x) {
// CIR-LABEL: _Z22test_builtin_popcountgj
// CIR: [[TMP:%.+]] = cir.popcount %{{.+}} : !u32i
-// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i
+// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i
// LLVM-LABEL: _Z22test_builtin_popcountgj
// LLVM: %{{.+}} = call i32 @llvm.ctpop.i32(i32 %{{.+}})
diff --git a/clang/test/CIR/CodeGen/builtin_call.cpp b/clang/test/CIR/CodeGen/builtin_call.cpp
index 853d894..a30df97 100644
--- a/clang/test/CIR/CodeGen/builtin_call.cpp
+++ b/clang/test/CIR/CodeGen/builtin_call.cpp
@@ -165,9 +165,9 @@ void expect(int x, int y) {
// CIR-LABEL: cir.func{{.*}} @_Z6expectii
// CIR: %[[X:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, !s32i
-// CIR-NEXT: %[[X_LONG:.+]] = cir.cast(integral, %[[X]] : !s32i), !s64i
+// CIR-NEXT: %[[X_LONG:.+]] = cir.cast integral %[[X]] : !s32i -> !s64i
// CIR-NEXT: %[[Y:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, !s32i
-// CIR-NEXT: %[[Y_LONG:.+]] = cir.cast(integral, %[[Y]] : !s32i), !s64i
+// CIR-NEXT: %[[Y_LONG:.+]] = cir.cast integral %[[Y]] : !s32i -> !s64i
// CIR-NEXT: %{{.+}} = cir.expect(%[[X_LONG]], %[[Y_LONG]]) : !s64i
// CIR: }
@@ -185,9 +185,9 @@ void expect_prob(int x, int y) {
// CIR-LABEL: cir.func{{.*}} @_Z11expect_probii
// CIR: %[[X:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, !s32i
-// CIR-NEXT: %[[X_LONG:.+]] = cir.cast(integral, %[[X]] : !s32i), !s64i
+// CIR-NEXT: %[[X_LONG:.+]] = cir.cast integral %[[X]] : !s32i -> !s64i
// CIR-NEXT: %[[Y:.+]] = cir.load align(4) %{{.+}} : !cir.ptr<!s32i>, !s32i
-// CIR-NEXT: %[[Y_LONG:.+]] = cir.cast(integral, %[[Y]] : !s32i), !s64i
+// CIR-NEXT: %[[Y_LONG:.+]] = cir.cast integral %[[Y]] : !s32i -> !s64i
// CIR-NEXT: %{{.+}} = cir.expect(%[[X_LONG]], %[[Y_LONG]], 2.500000e-01) : !s64i
// CIR: }
diff --git a/clang/test/CIR/CodeGen/builtin_printf.cpp b/clang/test/CIR/CodeGen/builtin_printf.cpp
index 80875c3..898984a 100644
--- a/clang/test/CIR/CodeGen/builtin_printf.cpp
+++ b/clang/test/CIR/CodeGen/builtin_printf.cpp
@@ -28,11 +28,11 @@ void func(char const * const str, int i) {
// CIR: %[[null_ptr:.+]] = cir.const #cir.ptr<null> : !cir.ptr<!s8i>
// CIR: %[[printf_result1:.+]] = cir.call @printf(%[[null_ptr]]) nothrow : (!cir.ptr<!s8i>) -> !s32i
// CIR: %[[str_fmt_global:.+]] = cir.get_global @".str" : !cir.ptr<!cir.array<!s8i x 3>>
-// CIR: %[[str_fmt_ptr:.+]] = cir.cast(array_to_ptrdecay, %[[str_fmt_global]] : !cir.ptr<!cir.array<!s8i x 3>>), !cir.ptr<!s8i>
+// CIR: %[[str_fmt_ptr:.+]] = cir.cast array_to_ptrdecay %[[str_fmt_global]] : !cir.ptr<!cir.array<!s8i x 3>> -> !cir.ptr<!s8i>
// CIR: %[[str_val:.+]] = cir.load{{.*}} %[[str_ptr]] : !cir.ptr<!cir.ptr<!s8i>>, !cir.ptr<!s8i>
// CIR: %[[printf_result2:.+]] = cir.call @printf(%[[str_fmt_ptr]], %[[str_val]]) nothrow : (!cir.ptr<!s8i>, !cir.ptr<!s8i>) -> !s32i
// CIR: %[[full_fmt_global:.+]] = cir.get_global @".str.1" : !cir.ptr<!cir.array<!s8i x 7>>
-// CIR: %[[full_fmt_ptr:.+]] = cir.cast(array_to_ptrdecay, %[[full_fmt_global]] : !cir.ptr<!cir.array<!s8i x 7>>), !cir.ptr<!s8i>
+// CIR: %[[full_fmt_ptr:.+]] = cir.cast array_to_ptrdecay %[[full_fmt_global]] : !cir.ptr<!cir.array<!s8i x 7>> -> !cir.ptr<!s8i>
// CIR: %[[str_val2:.+]] = cir.load{{.*}} %[[str_ptr]] : !cir.ptr<!cir.ptr<!s8i>>, !cir.ptr<!s8i>
// CIR: %[[i_val:.+]] = cir.load{{.*}} %[[i_ptr]] : !cir.ptr<!s32i>, !s32i
// CIR: %[[printf_result3:.+]] = cir.call @printf(%[[full_fmt_ptr]], %[[str_val2]], %[[i_val]]) nothrow : (!cir.ptr<!s8i>, !cir.ptr<!s8i>, !s32i) -> !s32i
diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp
index caf6de7..7afa955 100644
--- a/clang/test/CIR/CodeGen/cast.cpp
+++ b/clang/test/CIR/CodeGen/cast.cpp
@@ -12,7 +12,7 @@ unsigned char cxxstaticcast_0(unsigned int x) {
// CIR: %[[RV:[0-9]+]] = cir.alloca !u8i, !cir.ptr<!u8i>, ["__retval"] {alignment = 1 : i64}
// CIR: cir.store %arg0, %[[XPTR]] : !u32i, !cir.ptr<!u32i>
// CIR: %[[XVAL:[0-9]+]] = cir.load{{.*}} %[[XPTR]] : !cir.ptr<!u32i>, !u32i
-// CIR: %[[CASTED:[0-9]+]] = cir.cast(integral, %[[XVAL]] : !u32i), !u8i
+// CIR: %[[CASTED:[0-9]+]] = cir.cast integral %[[XVAL]] : !u32i -> !u8i
// CIR: cir.store %[[CASTED]], %[[RV]] : !u8i, !cir.ptr<!u8i>
// CIR: %[[R:[0-9]+]] = cir.load{{.*}} %1 : !cir.ptr<!u8i>, !u8i
// CIR: cir.return %[[R]] : !u8i
@@ -30,55 +30,55 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4, double x5) {
// LLVM: define{{.*}} i32 @_Z13cStyleCasts_0jifsd
char a = (char)x1; // truncate
- // CIR: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !s8i
+ // CIR: %{{[0-9]+}} = cir.cast integral %{{[0-9]+}} : !u32i -> !s8i
// LLVM: %{{[0-9]+}} = trunc i32 %{{[0-9]+}} to i8
short b = (short)x2; // truncate with sign
- // CIR: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !s16i
+ // CIR: %{{[0-9]+}} = cir.cast integral %{{[0-9]+}} : !s32i -> !s16i
// LLVM: %{{[0-9]+}} = trunc i32 %{{[0-9]+}} to i16
long long c = (long long)x1; // zero extend
- // CIR: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !s64i
+ // CIR: %{{[0-9]+}} = cir.cast integral %{{[0-9]+}} : !u32i -> !s64i
// LLVM: %{{[0-9]+}} = zext i32 %{{[0-9]+}} to i64
long long d = (long long)x2; // sign extend
- // CIR: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !s64i
+ // CIR: %{{[0-9]+}} = cir.cast integral %{{[0-9]+}} : !s32i -> !s64i
// LLVM: %{{[0-9]+}} = sext i32 %{{[0-9]+}} to i64
unsigned ui = (unsigned)x2; // sign drop
- // CIR: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !u32i
+ // CIR: %{{[0-9]+}} = cir.cast integral %{{[0-9]+}} : !s32i -> !u32i
int si = (int)x1; // sign add
- // CIR: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !s32i
+ // CIR: %{{[0-9]+}} = cir.cast integral %{{[0-9]+}} : !u32i -> !s32i
bool ib;
int bi = (int)ib; // bool to int
- // CIR: %{{[0-9]+}} = cir.cast(bool_to_int, %{{[0-9]+}} : !cir.bool), !s32i
+ // CIR: %{{[0-9]+}} = cir.cast bool_to_int %{{[0-9]+}} : !cir.bool -> !s32i
// LLVM: %{{[0-9]+}} = zext i1 %{{[0-9]+}} to i32
bool b2 = x2; // int to bool
- // CIR: %{{[0-9]+}} = cir.cast(int_to_bool, %{{[0-9]+}} : !s32i), !cir.bool
+ // CIR: %{{[0-9]+}} = cir.cast int_to_bool %{{[0-9]+}} : !s32i -> !cir.bool
// LLVM: %[[INTTOBOOL:[0-9]+]] = icmp ne i32 %{{[0-9]+}}, 0
// LLVM: zext i1 %[[INTTOBOOL]] to i8
void *p;
bool b3 = p; // ptr to bool
- // CIR: %{{[0-9]+}} = cir.cast(ptr_to_bool, %{{[0-9]+}} : !cir.ptr<!void>), !cir.bool
+ // CIR: %{{[0-9]+}} = cir.cast ptr_to_bool %{{[0-9]+}} : !cir.ptr<!void> -> !cir.bool
// LLVM: %[[PTRTOBOOL:[0-9]+]] = icmp ne ptr %{{[0-9]+}}, null
// LLVM: zext i1 %[[PTRTOBOOL]] to i8
float f;
bool b4 = f; // float to bool
- // CIR: %{{[0-9]+}} = cir.cast(float_to_bool, %{{[0-9]+}} : !cir.float), !cir.bool
+ // CIR: %{{[0-9]+}} = cir.cast float_to_bool %{{[0-9]+}} : !cir.float -> !cir.bool
// LLVM: %{{[0-9]+}} = fcmp une float %{{[0-9]+}}, 0.000000e+00
// LLVM: %{{[0-9]+}} = zext i1 %{{[0-9]+}} to i8
double d2 = f; // float to double
- // CIR: %{{[0-9]+}} = cir.cast(floating, %{{[0-9]+}} : !cir.float), !cir.double
+ // CIR: %{{[0-9]+}} = cir.cast floating %{{[0-9]+}} : !cir.float -> !cir.double
// LLVM: %{{[0-9]+}} = fpext float %{{[0-9]+}} to double
f = d2; // double to float
- // CIR: %{{[0-9]+}} = cir.cast(floating, %{{[0-9]+}} : !cir.double), !cir.float
+ // CIR: %{{[0-9]+}} = cir.cast floating %{{[0-9]+}} : !cir.double -> !cir.float
// LLVM: %{{[0-9]+}} = fptrunc double %{{[0-9]+}} to float
return 0;
@@ -93,7 +93,7 @@ bool cptr(void *d) {
// CIR: %[[DPTR:[0-9]+]] = cir.alloca !cir.ptr<!void>, !cir.ptr<!cir.ptr<!void>>, ["d", init] {alignment = 8 : i64}
// CIR: %[[DVAL:[0-9]+]] = cir.load{{.*}} %[[DPTR]] : !cir.ptr<!cir.ptr<!void>>, !cir.ptr<!void>
-// CIR: %{{[0-9]+}} = cir.cast(ptr_to_bool, %[[DVAL]] : !cir.ptr<!void>), !cir.bool
+// CIR: %{{[0-9]+}} = cir.cast ptr_to_bool %[[DVAL]] : !cir.ptr<!void> -> !cir.bool
// LLVM-LABEL: define{{.*}} i1 @_Z4cptrPv(ptr %0)
// LLVM: %[[ARG_STORAGE:.*]] = alloca ptr, i64 1
@@ -127,7 +127,7 @@ void bitcast() {
}
// CIR: %[[D_VEC:.*]] = cir.load{{.*}} {{.*}} : !cir.ptr<!cir.vector<2 x !cir.double>>, !cir.vector<2 x !cir.double>
-// CIR: %[[I_VEC:.*]] = cir.cast(bitcast, %[[D_VEC]] : !cir.vector<2 x !cir.double>), !cir.vector<4 x !s32i>
+// CIR: %[[I_VEC:.*]] = cir.cast bitcast %[[D_VEC]] : !cir.vector<2 x !cir.double> -> !cir.vector<4 x !s32i>
// LLVM: %[[D_VEC:.*]] = load <2 x double>, ptr {{.*}}, align 16
// LLVM: %[[I_VEC:.*]] = bitcast <2 x double> %[[D_VEC]] to <4 x i32>
diff --git a/clang/test/CIR/CodeGen/cmp.cpp b/clang/test/CIR/CodeGen/cmp.cpp
index 75c8cda..7e32d16 100644
--- a/clang/test/CIR/CodeGen/cmp.cpp
+++ b/clang/test/CIR/CodeGen/cmp.cpp
@@ -407,9 +407,9 @@ void bool_cmp(bool a, bool b) {
// CIR: %[[X_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["x", init]
// CIR: %[[A1:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr<!cir.bool>, !cir.bool
-// CIR: %[[A1_INT:.*]] = cir.cast(bool_to_int, %[[A1]] : !cir.bool), !s32i
+// CIR: %[[A1_INT:.*]] = cir.cast bool_to_int %[[A1]] : !cir.bool -> !s32i
// CIR: %[[B1:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr<!cir.bool>, !cir.bool
-// CIR: %[[B1_INT:.*]] = cir.cast(bool_to_int, %[[B1]] : !cir.bool), !s32i
+// CIR: %[[B1_INT:.*]] = cir.cast bool_to_int %[[B1]] : !cir.bool -> !s32i
// CIR: %{{.*}} = cir.cmp(gt, %[[A1_INT]], %[[B1_INT]]) : !s32i, !cir.bool
// CIR: cir.store{{.*}} {{.*}}, %[[X_PTR]] : !cir.bool, !cir.ptr<!cir.bool>
diff --git a/clang/test/CIR/CodeGen/comma.c b/clang/test/CIR/CodeGen/comma.c
index a1479b8..cc26a3f 100644
--- a/clang/test/CIR/CodeGen/comma.c
+++ b/clang/test/CIR/CodeGen/comma.c
@@ -24,7 +24,7 @@ void comma(void) {
// CIR: %[[TRUE:.*]] = cir.const #true
// CIR: cir.store{{.*}} %[[TRUE]], %[[B]] : !cir.bool, !cir.ptr<!cir.bool>
// CIR: %[[CHAR_INI_INIT:.*]] = cir.const #cir.int<65> : !s32i
-// CIR: %[[CHAR_VAL:.*]] = cir.cast(integral, %[[CHAR_INI_INIT]] : !s32i), !s8i
+// CIR: %[[CHAR_VAL:.*]] = cir.cast integral %[[CHAR_INI_INIT]] : !s32i -> !s8i
// CIR: cir.store{{.*}} %[[CHAR_VAL]], %[[C]] : !s8i, !cir.ptr<!s8i>
// CIR: %[[FLOAT_VAL:.*]] = cir.const #cir.fp<3.140000e+00> : !cir.float
// CIR: cir.store{{.*}} %[[FLOAT_VAL]], %[[F]] : !cir.float, !cir.ptr<!cir.float>
diff --git a/clang/test/CIR/CodeGen/complex-cast.cpp b/clang/test/CIR/CodeGen/complex-cast.cpp
index a8f51cd..5dc08eb 100644
--- a/clang/test/CIR/CodeGen/complex-cast.cpp
+++ b/clang/test/CIR/CodeGen/complex-cast.cpp
@@ -20,7 +20,7 @@ void scalar_to_complex() {
ci = sd;
}
-// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast(float_to_complex, %{{.*}} : !cir.double), !cir.complex<!cir.double>
+// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast float_to_complex %{{.*}} : !cir.double -> !cir.complex<!cir.double>
// CIR-AFTER: %[[REAL:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!cir.double>, !cir.double
// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.double
@@ -35,7 +35,7 @@ void scalar_to_complex() {
// OGCG: store double %[[REAL]], ptr {{.*}}, align 8
// OGCG: store double 0.000000e+00, ptr getelementptr inbounds nuw ({ double, double }, ptr @cd, i32 0, i32 1), align 8
-// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast(int_to_complex, %{{.*}} : !s32i), !cir.complex<!s32i>
+// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast int_to_complex %{{.*}} : !s32i -> !cir.complex<!s32i>
// CIR-AFTER: %[[REAL:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!s32i>, !s32i
// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.int<0> : !s32i
@@ -50,11 +50,11 @@ void scalar_to_complex() {
// OGCG: store i32 %[[REAL]], ptr {{.*}}, align 4
// OGCG: store i32 0, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci, i32 0, i32 1), align 4
-// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast(int_to_float, %{{.*}} : !s32i), !cir.double
-// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast(float_to_complex, %[[INT_TO_FP]] : !cir.double), !cir.complex<!cir.double>
+// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast int_to_float %{{.*}} : !s32i -> !cir.double
+// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast float_to_complex %[[INT_TO_FP]] : !cir.double -> !cir.complex<!cir.double>
// CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!s32i>, !s32i
-// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast(int_to_float, %[[TMP]] : !s32i), !cir.double
+// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast int_to_float %[[TMP]] : !s32i -> !cir.double
// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.double
// CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !cir.double -> !cir.complex<!cir.double>
@@ -69,11 +69,11 @@ void scalar_to_complex() {
// OGCG: store double %[[REAL]], ptr {{.*}}, align 8
// OGCG: store double 0.000000e+00, ptr getelementptr inbounds nuw ({ double, double }, ptr {{.*}}, i32 0, i32 1), align 8
-// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast(float_to_int, %{{.*}} : !cir.double), !s32i
-// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast(int_to_complex, %[[FP_TO_INT]] : !s32i), !cir.complex<!s32i>
+// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast float_to_int %{{.*}} : !cir.double -> !s32i
+// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast int_to_complex %[[FP_TO_INT]] : !s32i -> !cir.complex<!s32i>
// CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!cir.double>, !cir.double
-// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast(float_to_int, %[[TMP]] : !cir.double), !s32i
+// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast float_to_int %[[TMP]] : !cir.double -> !s32i
// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.int<0> : !s32i
// CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !s32i -> !cir.complex<!s32i>
@@ -95,7 +95,7 @@ void scalar_to_complex_explicit() {
ci = (int _Complex)sd;
}
-// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast(float_to_complex, %{{.*}} : !cir.double), !cir.complex<!cir.double>
+// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast float_to_complex %{{.*}} : !cir.double -> !cir.complex<!cir.double>
// CIR-AFTER: %[[REAL:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!cir.double>, !cir.double
// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.double
@@ -110,7 +110,7 @@ void scalar_to_complex_explicit() {
// OGCG: store double %[[REAL]], ptr {{.*}}, align 8
// OGCG: store double 0.000000e+00, ptr getelementptr inbounds nuw ({ double, double }, ptr @cd, i32 0, i32 1), align 8
-// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast(int_to_complex, %{{.*}} : !s32i), !cir.complex<!s32i>
+// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast int_to_complex %{{.*}} : !s32i -> !cir.complex<!s32i>
// CIR-AFTER: %[[REAL:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!s32i>, !s32i
// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.int<0> : !s32i
@@ -125,11 +125,11 @@ void scalar_to_complex_explicit() {
// OGCG: store i32 %[[REAL]], ptr {{.*}}, align 4
// OGCG: store i32 0, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci, i32 0, i32 1), align 4
-// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast(int_to_float, %{{.*}} : !s32i), !cir.double
-// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast(float_to_complex, %[[INT_TO_FP]] : !cir.double), !cir.complex<!cir.double>
+// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast int_to_float %{{.*}} : !s32i -> !cir.double
+// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast float_to_complex %[[INT_TO_FP]] : !cir.double -> !cir.complex<!cir.double>
// CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!s32i>, !s32i
-// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast(int_to_float, %[[TMP]] : !s32i), !cir.double
+// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast int_to_float %[[TMP]] : !s32i -> !cir.double
// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.double
// CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !cir.double -> !cir.complex<!cir.double>
@@ -144,11 +144,11 @@ void scalar_to_complex_explicit() {
// OGCG: store double %[[REAL]], ptr {{.*}}, align 8
// OGCG: store double 0.000000e+00, ptr getelementptr inbounds nuw ({ double, double }, ptr {{.*}}, i32 0, i32 1), align 8
-// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast(float_to_int, %{{.*}} : !cir.double), !s32i
-// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast(int_to_complex, %[[FP_TO_INT]] : !s32i), !cir.complex<!s32i>
+// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast float_to_int %{{.*}} : !cir.double -> !s32i
+// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast int_to_complex %[[FP_TO_INT]] : !s32i -> !cir.complex<!s32i>
// CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!cir.double>, !cir.double
-// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast(float_to_int, %[[TMP]] : !cir.double), !s32i
+// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast float_to_int %[[TMP]] : !cir.double -> !s32i
// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.int<0> : !s32i
// CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !s32i -> !cir.complex<!s32i>
@@ -170,7 +170,7 @@ void complex_to_scalar() {
si = (int)cd;
}
-// CIR-BEFORE: %[[FP_TO_COMPLEX_REAL:.*]] = cir.cast(float_complex_to_real, %{{.*}} : !cir.complex<!cir.double>), !cir.double
+// CIR-BEFORE: %[[FP_TO_COMPLEX_REAL:.*]] = cir.cast float_complex_to_real %{{.*}} : !cir.complex<!cir.double> -> !cir.double
// CIR-AFTER: %{{.*}} = cir.complex.real %{{.*}} : !cir.complex<!cir.double> -> !cir.double
@@ -180,7 +180,7 @@ void complex_to_scalar() {
// OGCG: %[[REAL:.*]] = load double, ptr {{.*}}, align 8
// OGCG: store double %[[REAL]], ptr {{.*}}, align 8
-// CIR-BEFORE: %[[INT_COMPLEX_TO_REAL:.*]] = cir.cast(int_complex_to_real, %{{.*}} : !cir.complex<!s32i>), !s32i
+// CIR-BEFORE: %[[INT_COMPLEX_TO_REAL:.*]] = cir.cast int_complex_to_real %{{.*}} : !cir.complex<!s32i> -> !s32i
// CIR-AFTER: %{{.*}} = cir.complex.real %{{.*}} : !cir.complex<!s32i> -> !s32i
@@ -190,11 +190,11 @@ void complex_to_scalar() {
// OGCG: %[[REAL:.*]] = load i32, ptr {{.*}}, align 4
// OGCG: store i32 %[[REAL]], ptr {{.*}}, align 4
-// CIR-BEFORE: %[[INT_COMPLEX_TO_REAL:.*]] = cir.cast(int_complex_to_real, %{{.*}} : !cir.complex<!s32i>), !s32i
-// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast(int_to_float, %[[INT_COMPLEX_TO_REAL]] : !s32i), !cir.double
+// CIR-BEFORE: %[[INT_COMPLEX_TO_REAL:.*]] = cir.cast int_complex_to_real %{{.*}} : !cir.complex<!s32i> -> !s32i
+// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast int_to_float %[[INT_COMPLEX_TO_REAL]] : !s32i -> !cir.double
// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex<!s32i> -> !s32i
-// CIR-AFTER-NEXT: %{{.*}} = cir.cast(int_to_float, %[[REAL]] : !s32i), !cir.double
+// CIR-AFTER-NEXT: %{{.*}} = cir.cast int_to_float %[[REAL]] : !s32i -> !cir.double
// LLVM: %[[REAL:.*]] = extractvalue { i32, i32 } %{{.+}}, 0
// LLVM-NEXT: %[[REAL_TO_DOUBLE:.*]] = sitofp i32 %[[REAL]] to double
@@ -204,11 +204,11 @@ void complex_to_scalar() {
// OGCG: %[[INT_TO_FP:.*]] = sitofp i32 %[[REAL]] to double
// OGCG: store double %[[INT_TO_FP]], ptr {{.*}}, align 8
-// CIR-BEFORE: %[[FP_TO_COMPLEX_REAL:.*]] = cir.cast(float_complex_to_real, %{{.*}} : !cir.complex<!cir.double>), !cir.double
-// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast(float_to_int, %[[FP_TO_COMPLEX_REAL]] : !cir.double), !s32i
+// CIR-BEFORE: %[[FP_TO_COMPLEX_REAL:.*]] = cir.cast float_complex_to_real %{{.*}} : !cir.complex<!cir.double> -> !cir.double
+// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast float_to_int %[[FP_TO_COMPLEX_REAL]] : !cir.double -> !s32i
// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex<!cir.double> -> !cir.double
-// CIR-AFTER-NEXT: %{{.*}} = cir.cast(float_to_int, %[[REAL]] : !cir.double), !s32i
+// CIR-AFTER-NEXT: %{{.*}} = cir.cast float_to_int %[[REAL]] : !cir.double -> !s32i
// LLVM: %[[REAL:.*]] = extractvalue { double, double } %{{.+}}, 0
// LLVM-NEXT: %[[REAL_TO_INT:.*]] = fptosi double %[[REAL]] to i32
@@ -223,12 +223,12 @@ void complex_to_bool() {
b = (bool)ci;
}
-// CIR-BEFORE: %[[FP_COMPLEX_TO_BOOL:.*]] = cir.cast(float_complex_to_bool, %{{.*}} : !cir.complex<!cir.double>), !cir.bool
+// CIR-BEFORE: %[[FP_COMPLEX_TO_BOOL:.*]] = cir.cast float_complex_to_bool %{{.*}} : !cir.complex<!cir.double> -> !cir.bool
// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex<!cir.double> -> !cir.double
// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.complex.imag %{{.*}} : !cir.complex<!cir.double> -> !cir.double
-// CIR-AFTER-NEXT: %[[REAL_TO_BOOL:.*]] = cir.cast(float_to_bool, %[[REAL]] : !cir.double), !cir.bool
-// CIR-AFTER-NEXT: %[[IMAG_TO_BOOL:.*]] = cir.cast(float_to_bool, %[[IMAG]] : !cir.double), !cir.bool
+// CIR-AFTER-NEXT: %[[REAL_TO_BOOL:.*]] = cir.cast float_to_bool %[[REAL]] : !cir.double -> !cir.bool
+// CIR-AFTER-NEXT: %[[IMAG_TO_BOOL:.*]] = cir.cast float_to_bool %[[IMAG]] : !cir.double -> !cir.bool
// CIR-AFTER-NEXT: %[[CONST_TRUE:.*]] = cir.const #true
// CIR-AFTER-NEXT: %{{.*}} = cir.select if %[[REAL_TO_BOOL]] then %[[CONST_TRUE]] else %[[IMAG_TO_BOOL]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool
@@ -248,12 +248,12 @@ void complex_to_bool() {
// OGCG: %[[BOOL_TO_INT:.*]] = zext i1 %[[COMPLEX_TO_BOOL]] to i8
// OGCG: store i8 %[[BOOL_TO_INT]], ptr {{.*}}, align 1
-// CIR-BEFORE: %[[INT_COMPLEX_TO_BOOL:.*]] = cir.cast(int_complex_to_bool, %{{.*}} : !cir.complex<!s32i>), !cir.bool
+// CIR-BEFORE: %[[INT_COMPLEX_TO_BOOL:.*]] = cir.cast int_complex_to_bool %{{.*}} : !cir.complex<!s32i> -> !cir.bool
// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex<!s32i> -> !s32i
// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.complex.imag %{{.*}} : !cir.complex<!s32i> -> !s32i
-// CIR-AFTER-NEXT: %[[REAL_TO_BOOL:.*]] = cir.cast(int_to_bool, %[[REAL]] : !s32i), !cir.bool
-// CIR-AFTER-NEXT: %[[IMAG_TO_BOOL:.*]] = cir.cast(int_to_bool, %[[IMAG]] : !s32i), !cir.bool
+// CIR-AFTER-NEXT: %[[REAL_TO_BOOL:.*]] = cir.cast int_to_bool %[[REAL]] : !s32i -> !cir.bool
+// CIR-AFTER-NEXT: %[[IMAG_TO_BOOL:.*]] = cir.cast int_to_bool %[[IMAG]] : !s32i -> !cir.bool
// CIR-AFTER-NEXT: %[[CONST_TRUE:.*]] = cir.const #true
// CIR-AFTER-NEXT: %{{.+}} = cir.select if %[[REAL_TO_BOOL]] then %[[CONST_TRUE]] else %[[IMAG_TO_BOOL]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool
@@ -279,12 +279,12 @@ void complex_to_complex_cast() {
}
// CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[FP_COMPLEX:.*]] = cir.cast(float_complex, %[[TMP]] : !cir.complex<!cir.float>), !cir.complex<!cir.double>
+// CIR-BEFORE: %[[FP_COMPLEX:.*]] = cir.cast float_complex %[[TMP]] : !cir.complex<!cir.float> -> !cir.complex<!cir.double>
// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %{{.*}} : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[REAL_FP_CAST:.*]] = cir.cast(floating, %[[REAL]] : !cir.float), !cir.double
-// CIR-AFTER: %[[IMAG_FP_CAST:.*]] = cir.cast(floating, %[[IMAG]] : !cir.float), !cir.double
+// CIR-AFTER: %[[REAL_FP_CAST:.*]] = cir.cast floating %[[REAL]] : !cir.float -> !cir.double
+// CIR-AFTER: %[[IMAG_FP_CAST:.*]] = cir.cast floating %[[IMAG]] : !cir.float -> !cir.double
// CIR-AFTER: %{{.*}} = cir.complex.create %[[REAL_FP_CAST]], %[[IMAG_FP_CAST]] : !cir.double -> !cir.complex<!cir.double>
// LLVM: %[[REAL:.*]] = extractvalue { float, float } %{{.*}}, 0
@@ -303,12 +303,12 @@ void complex_to_complex_cast() {
// OGCG: store double %[[IMAG_FP_CAST]], ptr getelementptr inbounds nuw ({ double, double }, ptr {{.*}}, i32 0, i32 1), align 8
// CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!cir.complex<!s16i>>, !cir.complex<!s16i>
-// CIR-BEFORE: %[[INT_COMPLEX:.*]] = cir.cast(int_complex, %[[TMP]] : !cir.complex<!s16i>), !cir.complex<!s32i>
+// CIR-BEFORE: %[[INT_COMPLEX:.*]] = cir.cast int_complex %[[TMP]] : !cir.complex<!s16i> -> !cir.complex<!s32i>
// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex<!s16i> -> !s16i
// CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %{{.*}} : !cir.complex<!s16i> -> !s16i
-// CIR-AFTER: %[[REAL_INT_CAST:.*]] = cir.cast(integral, %[[REAL]] : !s16i), !s32i
-// CIR-AFTER: %[[IMAG_INT_CAST:.*]] = cir.cast(integral, %[[IMAG]] : !s16i), !s32i
+// CIR-AFTER: %[[REAL_INT_CAST:.*]] = cir.cast integral %[[REAL]] : !s16i -> !s32i
+// CIR-AFTER: %[[IMAG_INT_CAST:.*]] = cir.cast integral %[[IMAG]] : !s16i -> !s32i
// CIR-AFTER: %{{.*}} = cir.complex.create %[[REAL_INT_CAST]], %[[IMAG_INT_CAST]] : !s32i -> !cir.complex<!s32i>
// LLVM: %[[REAL:.*]] = extractvalue { i16, i16 } %{{.*}}, 0
@@ -336,9 +336,9 @@ void lvalue_to_rvalue_bitcast() {
double _Complex b = __builtin_bit_cast(double _Complex, a);
}
-// CIR-BEFORE: %{{.*}} = cir.cast(bitcast, %{{.*}} : !cir.ptr<!rec_CX>), !cir.ptr<!cir.complex<!cir.double>>
+// CIR-BEFORE: %{{.*}} = cir.cast bitcast %{{.*}} : !cir.ptr<!rec_CX> -> !cir.ptr<!cir.complex<!cir.double>>
-// CIR-AFTER: %{{.*}} = cir.cast(bitcast, %{{.*}} : !cir.ptr<!rec_CX>), !cir.ptr<!cir.complex<!cir.double>>
+// CIR-AFTER: %{{.*}} = cir.cast bitcast %{{.*}} : !cir.ptr<!rec_CX> -> !cir.ptr<!cir.complex<!cir.double>>
// LLVM: %[[PTR_ADDR:.*]] = alloca %struct.CX, i64 1, align 8
// LLVM: %[[COMPLEX_ADDR:.*]] = alloca { double, double }, i64 1, align 8
@@ -361,9 +361,9 @@ void lvalue_bitcast() {
(double _Complex &)a = {};
}
-// CIR-BEFORE: %{{.*}} = cir.cast(bitcast, %{{.*}} : !cir.ptr<!rec_CX>), !cir.ptr<!cir.complex<!cir.double>>
+// CIR-BEFORE: %{{.*}} = cir.cast bitcast %{{.*}} : !cir.ptr<!rec_CX> -> !cir.ptr<!cir.complex<!cir.double>>
-// CIR-AFTER: %{{.*}} = cir.cast(bitcast, %{{.*}} : !cir.ptr<!rec_CX>), !cir.ptr<!cir.complex<!cir.double>>
+// CIR-AFTER: %{{.*}} = cir.cast bitcast %{{.*}} : !cir.ptr<!rec_CX> -> !cir.ptr<!cir.complex<!cir.double>>
// LLVM: %[[A_ADDR:.*]] = alloca %struct.CX, i64 1, align 8
// LLVM: store { double, double } zeroinitializer, ptr %[[A_ADDR]], align 8
diff --git a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
index 9909985..a5070f5 100644
--- a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
+++ b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
@@ -154,20 +154,20 @@ void foo3() {
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
-// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
-// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
+// CIR: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float
+// CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float
// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.f16> -> !cir.f16
-// CIR: %[[B_REAL_F32:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.f16), !cir.float
-// CIR: %[[B_IMAG_F32:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.f16), !cir.float
+// CIR: %[[B_REAL_F32:.*]] = cir.cast floating %[[B_REAL]] : !cir.f16 -> !cir.float
+// CIR: %[[B_IMAG_F32:.*]] = cir.cast floating %[[B_IMAG]] : !cir.f16 -> !cir.float
// CIR: %[[B_COMPLEX_F32:.*]] = cir.complex.create %[[B_REAL_F32]], %[[B_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR: %[[ADD_A_B:.*]] = cir.complex.add %[[B_COMPLEX_F32]], %[[A_COMPLEX_F32]] : !cir.complex<!cir.float>
// CIR: %[[ADD_REAL:.*]] = cir.complex.real %[[ADD_A_B]] : !cir.complex<!cir.float> -> !cir.float
// CIR: %[[ADD_IMAG:.*]] = cir.complex.imag %[[ADD_A_B]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[ADD_REAL_F16:.*]] = cir.cast(floating, %[[ADD_REAL]] : !cir.float), !cir.f16
-// CIR: %[[ADD_IMAG_F16:.*]] = cir.cast(floating, %[[ADD_IMAG]] : !cir.float), !cir.f16
+// CIR: %[[ADD_REAL_F16:.*]] = cir.cast floating %[[ADD_REAL]] : !cir.float -> !cir.f16
+// CIR: %[[ADD_IMAG_F16:.*]] = cir.cast floating %[[ADD_IMAG]] : !cir.float -> !cir.f16
// CIR: %[[RESULT:.*]] = cir.complex.create %[[ADD_REAL_F16]], %[[ADD_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
@@ -712,14 +712,14 @@ void foo13() {
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
-// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
-// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
+// CIR: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float
+// CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float
// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.f16> -> !cir.f16
-// CIR: %[[B_REAL_F32:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.f16), !cir.float
-// CIR: %[[B_IMAG_F32:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.f16), !cir.float
+// CIR: %[[B_REAL_F32:.*]] = cir.cast floating %[[B_REAL]] : !cir.f16 -> !cir.float
+// CIR: %[[B_IMAG_F32:.*]] = cir.cast floating %[[B_IMAG]] : !cir.f16 -> !cir.float
// CIR: %[[B_COMPLEX_F32:.*]] = cir.complex.create %[[B_REAL_F32]], %[[B_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
// CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
@@ -729,8 +729,8 @@ void foo13() {
// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.f16> -> !cir.f16
-// CIR: %[[B_REAL_F32:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.f16), !cir.float
-// CIR: %[[B_IMAG_F32:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.f16), !cir.float
+// CIR: %[[B_REAL_F32:.*]] = cir.cast floating %[[B_REAL]] : !cir.f16 -> !cir.float
+// CIR: %[[B_IMAG_F32:.*]] = cir.cast floating %[[B_IMAG]] : !cir.f16 -> !cir.float
// CIR: %[[B_COMPLEX_F32:.*]] = cir.complex.create %[[B_REAL_F32]], %[[B_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR: %[[B_REAL_F32:.*]] = cir.complex.real %[[B_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
// CIR: %[[B_IMAG_F32:.*]] = cir.complex.imag %[[B_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
@@ -739,8 +739,8 @@ void foo13() {
// CIR: %[[RESULT:.*]] = cir.call @__divsc3(%[[B_REAL_F32]], %[[B_IMAG_F32]], %[[DIV_AB_REAL]], %[[DIV_AB_IMAG]]) : (!cir.float, !cir.float, !cir.float, !cir.float) -> !cir.complex<!cir.float>
// CIR: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT]] : !cir.complex<!cir.float> -> !cir.float
// CIR: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : !cir.float), !cir.f16
-// CIR: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : !cir.float), !cir.f16
+// CIR: %[[RESULT_REAL_F16:.*]] = cir.cast floating %[[RESULT_REAL_F32]] : !cir.float -> !cir.f16
+// CIR: %[[RESULT_IMAG_F16:.*]] = cir.cast floating %[[RESULT_IMAG_F32]] : !cir.float -> !cir.f16
// CIR: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
// CIR: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
diff --git a/clang/test/CIR/CodeGen/complex-mul-div.cpp b/clang/test/CIR/CodeGen/complex-mul-div.cpp
index d493046..b306981 100644
--- a/clang/test/CIR/CodeGen/complex-mul-div.cpp
+++ b/clang/test/CIR/CodeGen/complex-mul-div.cpp
@@ -549,10 +549,10 @@ void foo3() {
// CIR-AFTER-PROMOTED: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER-PROMOTED: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER-PROMOTED: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER-PROMOTED: %[[A_REAL_F64:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.float), !cir.double
-// CIR-AFTER-PROMOTED: %[[A_IMAG_F64:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.float), !cir.double
-// CIR-AFTER-PROMOTED: %[[B_REAL_F64:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.float), !cir.double
-// CIR-AFTER-PROMOTED: %[[B_IMAG_F64:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[A_REAL_F64:.*]] = cir.cast floating %[[A_REAL]] : !cir.float -> !cir.double
+// CIR-AFTER-PROMOTED: %[[A_IMAG_F64:.*]] = cir.cast floating %[[A_IMAG]] : !cir.float -> !cir.double
+// CIR-AFTER-PROMOTED: %[[B_REAL_F64:.*]] = cir.cast floating %[[B_REAL]] : !cir.float -> !cir.double
+// CIR-AFTER-PROMOTED: %[[B_IMAG_F64:.*]] = cir.cast floating %[[B_IMAG]] : !cir.float -> !cir.double
// CIR-AFTER-PROMOTED: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL_F64]], %[[B_REAL_F64]]) : !cir.double
// CIR-AFTER-PROMOTED: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG_F64]], %[[B_IMAG_F64]]) : !cir.double
// CIR-AFTER-PROMOTED: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL_F64]], %[[B_REAL_F64]]) : !cir.double
@@ -567,8 +567,8 @@ void foo3() {
// CIR-AFTER-PROMOTED: %[[RESULT_F64:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.double -> !cir.complex<!cir.double>
// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F64:.*]] = cir.complex.real %[[RESULT_F64]] : !cir.complex<!cir.double> -> !cir.double
// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F64:.*]] = cir.complex.imag %[[RESULT_F64]] : !cir.complex<!cir.double> -> !cir.double
-// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F32:.*]] = cir.cast(floating, %[[RESULT_REAL_F64]] : !cir.double), !cir.float
-// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F32:.*]] = cir.cast(floating, %[[RESULT_IMAG_F64]] : !cir.double), !cir.float
+// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F32:.*]] = cir.cast floating %[[RESULT_REAL_F64]] : !cir.double -> !cir.float
+// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F32:.*]] = cir.cast floating %[[RESULT_IMAG_F64]] : !cir.double -> !cir.float
// CIR-AFTER-PROMOTED: %[[RESULT_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR-AFTER-PROMOTED: cir.store{{.*}} %[[RESULT_F32]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
@@ -1044,10 +1044,10 @@ void foo6() {
// CIR-AFTER-PROMOTED: %[[A_IMAG:.*]] = cir.complex.imag %[[COMPLEX_A]] : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER-PROMOTED: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER-PROMOTED: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER-PROMOTED: %[[A_REAL_F64:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.float), !cir.double
-// CIR-AFTER-PROMOTED: %[[A_IMAG_F64:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.float), !cir.double
-// CIR-AFTER-PROMOTED: %[[B_REAL_F64:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.float), !cir.double
-// CIR-AFTER-PROMOTED: %[[B_IMAG_F64:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[A_REAL_F64:.*]] = cir.cast floating %[[A_REAL]] : !cir.float -> !cir.double
+// CIR-AFTER-PROMOTED: %[[A_IMAG_F64:.*]] = cir.cast floating %[[A_IMAG]] : !cir.float -> !cir.double
+// CIR-AFTER-PROMOTED: %[[B_REAL_F64:.*]] = cir.cast floating %[[B_REAL]] : !cir.float -> !cir.double
+// CIR-AFTER-PROMOTED: %[[B_IMAG_F64:.*]] = cir.cast floating %[[B_IMAG]] : !cir.float -> !cir.double
// CIR-AFTER-PROMOTED: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL_F64]], %[[B_REAL_F64]]) : !cir.double
// CIR-AFTER-PROMOTED: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG_F64]], %[[B_IMAG_F64]]) : !cir.double
// CIR-AFTER-PROMOTED: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL_F64]], %[[B_REAL_F64]]) : !cir.double
@@ -1062,8 +1062,8 @@ void foo6() {
// CIR-AFTER-PROMOTED: %[[RESULT_F64:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.double -> !cir.complex<!cir.double>
// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F64:.*]] = cir.complex.real %[[RESULT_F64]] : !cir.complex<!cir.double> -> !cir.double
// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F64:.*]] = cir.complex.imag %[[RESULT_F64]] : !cir.complex<!cir.double> -> !cir.double
-// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F32:.*]] = cir.cast(floating, %[[RESULT_REAL_F64]] : !cir.double), !cir.float
-// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F32:.*]] = cir.cast(floating, %[[RESULT_IMAG_F64]] : !cir.double), !cir.float
+// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F32:.*]] = cir.cast floating %[[RESULT_REAL_F64]] : !cir.double -> !cir.float
+// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F32:.*]] = cir.cast floating %[[RESULT_IMAG_F64]] : !cir.double -> !cir.float
// CIR-AFTER-PROMOTED: %[[RESULT_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR-AFTER-PROMOTED: cir.store{{.*}} %[[RESULT_F32]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
diff --git a/clang/test/CIR/CodeGen/complex-unary.cpp b/clang/test/CIR/CodeGen/complex-unary.cpp
index d79199f..a8e434b 100644
--- a/clang/test/CIR/CodeGen/complex-unary.cpp
+++ b/clang/test/CIR/CodeGen/complex-unary.cpp
@@ -380,9 +380,9 @@ void foo9() {
// CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
// CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
// CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
-// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast(float_complex, %[[TMP_A]] : !cir.complex<!cir.f16>), !cir.complex<!cir.float>
+// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast float_complex %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.complex<!cir.float>
// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(plus, %[[A_COMPLEX_F32]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast(float_complex, %[[RESULT]] : !cir.complex<!cir.float>), !cir.complex<!cir.f16>
+// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast float_complex %[[RESULT]] : !cir.complex<!cir.float> -> !cir.complex<!cir.f16>
// CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
@@ -390,8 +390,8 @@ void foo9() {
// CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
-// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
-// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
+// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float
+// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float
// CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
@@ -400,8 +400,8 @@ void foo9() {
// CIR-AFTER: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : !cir.float), !cir.f16
-// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : !cir.float), !cir.f16
+// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast floating %[[RESULT_REAL_F32]] : !cir.float -> !cir.f16
+// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast floating %[[RESULT_IMAG_F32]] : !cir.float -> !cir.f16
// CIR-AFTER: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
// CIR-AFTER: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
@@ -445,9 +445,9 @@ void foo10() {
// CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
// CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
// CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
-// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast(float_complex, %[[TMP_A]] : !cir.complex<!cir.f16>), !cir.complex<!cir.float>
+// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast float_complex %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.complex<!cir.float>
// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(minus, %[[A_COMPLEX_F32]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast(float_complex, %[[RESULT]] : !cir.complex<!cir.float>), !cir.complex<!cir.f16>
+// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast float_complex %[[RESULT]] : !cir.complex<!cir.float> -> !cir.complex<!cir.f16>
// CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
@@ -455,8 +455,8 @@ void foo10() {
// CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
-// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
-// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
+// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float
+// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float
// CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
@@ -465,8 +465,8 @@ void foo10() {
// CIR-AFTER: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : !cir.float), !cir.f16
-// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : !cir.float), !cir.f16
+// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast floating %[[RESULT_REAL_F32]] : !cir.float -> !cir.f16
+// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast floating %[[RESULT_IMAG_F32]] : !cir.float -> !cir.f16
// CIR-AFTER: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
// CIR-AFTER: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
diff --git a/clang/test/CIR/CodeGen/complex.cpp b/clang/test/CIR/CodeGen/complex.cpp
index 4c396d3..ae69b24 100644
--- a/clang/test/CIR/CodeGen/complex.cpp
+++ b/clang/test/CIR/CodeGen/complex.cpp
@@ -612,7 +612,7 @@ void foo24() {
// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!cir.complex<!s32i> x 2>, !cir.ptr<!cir.array<!cir.complex<!s32i> x 2>>, ["arr"]
// CIR: %[[RESULT:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["r", init]
// CIR: %[[IDX:.*]] = cir.const #cir.int<1> : !s32i
-// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!cir.complex<!s32i> x 2>>), !cir.ptr<!cir.complex<!s32i>>
+// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!cir.complex<!s32i> x 2>> -> !cir.ptr<!cir.complex<!s32i>>
// CIR: %[[RESULT_VAL:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr<!cir.complex<!s32i>>, %[[IDX]] : !s32i), !cir.ptr<!cir.complex<!s32i>>
// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[RESULT_VAL]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
// CIR: cir.store{{.*}} %[[TMP]], %[[RESULT]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
@@ -938,11 +938,11 @@ void foo35() {
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
-// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
-// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
+// CIR: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float
+// CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float
// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[A_REAL_F16:.*]] = cir.cast(floating, %[[A_REAL_F32]] : !cir.float), !cir.f16
+// CIR: %[[A_REAL_F16:.*]] = cir.cast floating %[[A_REAL_F32]] : !cir.float -> !cir.f16
// CIR: cir.store{{.*}} %[[A_REAL_F16]], %[[REAL_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
@@ -975,11 +975,11 @@ void foo36() {
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
-// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
-// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
+// CIR: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float
+// CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float
// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[A_IMAG_F16:.*]] = cir.cast(floating, %[[A_IMAG_F32]] : !cir.float), !cir.f16
+// CIR: %[[A_IMAG_F16:.*]] = cir.cast floating %[[A_IMAG_F32]] : !cir.float -> !cir.f16
// CIR: cir.store{{.*}} %[[A_IMAG_F16]], %[[IMAG_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
@@ -1102,11 +1102,11 @@ void atomic_complex_type() {
// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
// CIR: %[[ATOMIC_TMP_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["atomic-temp"]
-// CIR: %[[A_PTR:.*]] = cir.cast(bitcast, %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>), !cir.ptr<!u64i>
-// CIR: %[[ATOMIC_TMP_PTR:.*]] = cir.cast(bitcast, %[[ATOMIC_TMP_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>), !cir.ptr<!u64i>
+// CIR: %[[A_PTR:.*]] = cir.cast bitcast %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>> -> !cir.ptr<!u64i>
+// CIR: %[[ATOMIC_TMP_PTR:.*]] = cir.cast bitcast %[[ATOMIC_TMP_ADDR]] : !cir.ptr<!cir.complex<!cir.float>> -> !cir.ptr<!u64i>
// CIR: %[[TMP_A_ATOMIC:.*]] = cir.load{{.*}} atomic(relaxed) %[[A_PTR]] : !cir.ptr<!u64i>, !u64i
// CIR: cir.store{{.*}} %[[TMP_A_ATOMIC]], %[[ATOMIC_TMP_PTR]] : !u64i, !cir.ptr<!u64i>
-// CIR: %[[TMP_ATOMIC_PTR:.*]] = cir.cast(bitcast, %[[ATOMIC_TMP_PTR]] : !cir.ptr<!u64i>), !cir.ptr<!cir.complex<!cir.float>>
+// CIR: %[[TMP_ATOMIC_PTR:.*]] = cir.cast bitcast %[[ATOMIC_TMP_PTR]] : !cir.ptr<!u64i> -> !cir.ptr<!cir.complex<!cir.float>>
// CIR: %[[TMP_ATOMIC:.*]] = cir.load{{.*}} %[[TMP_ATOMIC_PTR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
// CIR: cir.store{{.*}} %[[TMP_ATOMIC]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
@@ -1140,7 +1140,8 @@ void real_on_scalar_glvalue() {
// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a"]
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b", init]
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.float>, !cir.float
-// CIR: cir.store{{.*}} %[[TMP_A]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.float -> !cir.float
+// CIR: cir.store{{.*}} %[[A_REAL]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>
// LLVM: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
@@ -1178,8 +1179,9 @@ void real_on_scalar_with_type_promotion() {
// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["a"]
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.f16>, !cir.f16
-// CIR: %[[TMP_A_F32:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.f16), !cir.float
-// CIR: %[[TMP_A_F16:.*]] = cir.cast(floating, %[[TMP_A_F32]] : !cir.float), !cir.f16
+// CIR: %[[TMP_A_F32:.*]] = cir.cast floating %[[TMP_A]] : !cir.f16 -> !cir.float
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A_F32]] : !cir.float -> !cir.float
+// CIR: %[[TMP_A_F16:.*]] = cir.cast floating %[[A_REAL]] : !cir.float -> !cir.f16
// CIR: cir.store{{.*}} %[[TMP_A_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
// LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2
@@ -1204,7 +1206,7 @@ void imag_on_scalar_with_type_promotion() {
// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["a"]
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
// CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
-// CIR: %[[CONST_ZERO_F16:.*]] = cir.cast(floating, %[[CONST_ZERO]] : !cir.float), !cir.f16
+// CIR: %[[CONST_ZERO_F16:.*]] = cir.cast floating %[[CONST_ZERO]] : !cir.float -> !cir.f16
// CIR: cir.store{{.*}} %[[CONST_ZERO_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
// LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2
@@ -1244,11 +1246,12 @@ void real_on_scalar_from_real_with_type_promotion() {
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
-// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
-// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
+// CIR: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float
+// CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float
// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[A_REAL_F16:.*]] = cir.cast(floating, %[[A_REAL_F32]] : !cir.float), !cir.f16
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[A_REAL_F32]] : !cir.float -> !cir.float
+// CIR: %[[A_REAL_F16:.*]] = cir.cast floating %[[A_REAL]] : !cir.float -> !cir.f16
// CIR: cir.store{{.*}} %[[A_REAL_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
@@ -1281,12 +1284,13 @@ void real_on_scalar_from_imag_with_type_promotion() {
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
-// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
-// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
+// CIR: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float
+// CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float
// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[A_IMAG_F16:.*]] = cir.cast(floating, %[[A_IMAG_F32]] : !cir.float), !cir.f16
-// CIR: cir.store{{.*}} %[[A_IMAG_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
+// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_IMAG_F32]] : !cir.float -> !cir.float
+// CIR: %[[A_REAL_F16:.*]] = cir.cast floating %[[A_REAL_F32]] : !cir.float -> !cir.f16
+// CIR: cir.store{{.*}} %[[A_REAL_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
// LLVM: %[[B_ADDR]] = alloca half, i64 1, align 2
diff --git a/clang/test/CIR/CodeGen/cxx-default-init.cpp b/clang/test/CIR/CodeGen/cxx-default-init.cpp
index 06d3a27..b3d706f 100644
--- a/clang/test/CIR/CodeGen/cxx-default-init.cpp
+++ b/clang/test/CIR/CodeGen/cxx-default-init.cpp
@@ -33,7 +33,7 @@ struct ZeroInit {
// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
// CIR: cir.store{{.*}} %[[ZERO]], %[[P_B]]
// CIR: %[[ARR:.*]] = cir.get_member %[[THIS]][2] {name = "arr"}
-// CIR: %[[ARR_BEGIN:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 4>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_BEGIN:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!s32i x 4>> -> !cir.ptr<!s32i>
// CIR: cir.store{{.*}} %[[ARR_BEGIN]], %[[ITER]]
// CIR: %[[FOUR:.*]] = cir.const #cir.int<4> : !s64i
// CIR: %[[END:.*]] = cir.ptr_stride(%[[ARR_BEGIN]] : !cir.ptr<!s32i>, %[[FOUR]] : !s64i)
@@ -139,7 +139,7 @@ struct ValueInit {
// CIR: %[[THREE:.*]] = cir.const #cir.int<3> : !s32i
// CIR: cir.store{{.*}} %[[THREE]], %[[P_B]]
// CIR: %[[ARR:.*]] = cir.get_member %[[THIS]][2] {name = "arr"}
-// CIR: %[[ARR_BEGIN:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!s32i x 4>>), !cir.ptr<!s32i>
+// CIR: %[[ARR_BEGIN:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!s32i x 4>> -> !cir.ptr<!s32i>
// CIR: %[[FOUR:.*]] = cir.const #cir.int<4> : !s32i
// CIR: cir.store{{.*}} %[[FOUR]], %[[ARR_BEGIN]]
// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i
@@ -169,7 +169,7 @@ struct ValueInit {
// CIR: cir.store{{.*}} %[[FOUR_FIVEI]], %[[C]]
// CIR: %[[BF:.*]] = cir.get_member %[[THIS]][4] {name = "bf"}
// CIR: %[[FF:.*]] = cir.const #cir.int<255> : !s32i
-// CIR: %[[FF_CAST:.*]] = cir.cast(integral, %[[FF]] : !s32i), !u32i
+// CIR: %[[FF_CAST:.*]] = cir.cast integral %[[FF]] : !s32i -> !u32i
// CIR: %[[BF_VAL:.*]] = cir.set_bitfield{{.*}} (#bfi_bf, %[[BF]] : !cir.ptr<!u8i>, %[[FF_CAST]] : !u32i)
// LLVM: define{{.*}} void @_ZN9ValueInitC2Ev(ptr %[[THIS_ARG:.*]])
diff --git a/clang/test/CIR/CodeGen/delegating-ctor.cpp b/clang/test/CIR/CodeGen/delegating-ctor.cpp
index 73ee6b7..c95ecf4 100644
--- a/clang/test/CIR/CodeGen/delegating-ctor.cpp
+++ b/clang/test/CIR/CodeGen/delegating-ctor.cpp
@@ -116,23 +116,23 @@ Derived::Derived(const void *inVoid) { squawk(); }
// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]]
// CIR: %[[VPTR_GLOBAL_ADDR:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
-// CIR: %[[VPTR_PTR:.*]] = cir.cast(bitcast, %[[VPTR_GLOBAL_ADDR]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR: %[[VPTR_PTR:.*]] = cir.cast bitcast %[[VPTR_GLOBAL_ADDR]] : !cir.ptr<!cir.ptr<!void>> -> !cir.ptr<!cir.vptr>
// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_PTR]] : !cir.ptr<!cir.vptr>, !cir.vptr
// CIR: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_Derived> -> !cir.ptr<!cir.vptr>
// CIR: cir.store{{.*}} %[[VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr>
// CIR: %[[VPTR_BASE_ADDR:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
-// CIR: %[[VPTR_BASE_PTR:.*]] = cir.cast(bitcast, %[[VPTR_BASE_ADDR]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR: %[[VPTR_BASE_PTR:.*]] = cir.cast bitcast %[[VPTR_BASE_ADDR]] : !cir.ptr<!cir.ptr<!void>> -> !cir.ptr<!cir.vptr>
// CIR: %[[VPTR_BASE:.*]] = cir.load{{.*}} %[[VPTR_BASE_PTR]] : !cir.ptr<!cir.vptr>, !cir.vptr
// CIR: %[[VPTR_DERIVED_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_Derived> -> !cir.ptr<!cir.vptr>
// CIR: %[[VPTR_DERIVED:.*]] = cir.load{{.*}} %[[VPTR_DERIVED_ADDR]] : !cir.ptr<!cir.vptr>, !cir.vptr
-// CIR: %[[VPTR_DERIVED_AS_I8PTR:.*]] = cir.cast(bitcast, %[[VPTR_DERIVED]] : !cir.vptr), !cir.ptr<!u8i>
+// CIR: %[[VPTR_DERIVED_AS_I8PTR:.*]] = cir.cast bitcast %[[VPTR_DERIVED]] : !cir.vptr -> !cir.ptr<!u8i>
// CIR: %[[BASE_LOC_OFFSET:.*]] = cir.const #cir.int<-32> : !s64i
// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.ptr_stride(%[[VPTR_DERIVED_AS_I8PTR]] : !cir.ptr<!u8i>, %[[BASE_LOC_OFFSET]] : !s64i), !cir.ptr<!u8i>
-// CIR: %[[BASE_OFFSET_I64PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_PTR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i>
+// CIR: %[[BASE_OFFSET_I64PTR:.*]] = cir.cast bitcast %[[BASE_OFFSET_PTR]] : !cir.ptr<!u8i> -> !cir.ptr<!s64i>
// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_I64PTR]] : !cir.ptr<!s64i>, !s64i
-// CIR: %[[THIS_AS_I8PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr<!rec_Derived>), !cir.ptr<!u8i>
+// CIR: %[[THIS_AS_I8PTR:.*]] = cir.cast bitcast %[[THIS]] : !cir.ptr<!rec_Derived> -> !cir.ptr<!u8i>
// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_AS_I8PTR]] : !cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i>
-// CIR: %[[BASE_AS_I8PTR:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr<!u8i>), !cir.ptr<!rec_Derived>
+// CIR: %[[BASE_AS_I8PTR:.*]] = cir.cast bitcast %[[BASE_PTR]] : !cir.ptr<!u8i> -> !cir.ptr<!rec_Derived>
// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_AS_I8PTR]] : !cir.ptr<!rec_Derived> -> !cir.ptr<!cir.vptr>
// CIR: cir.store{{.*}} %[[VPTR_BASE]], %[[BASE_VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr>
// CIR: %[[VPTR_BASE_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_Derived> -> !cir.ptr<!cir.vptr>
diff --git a/clang/test/CIR/CodeGen/delete.cpp b/clang/test/CIR/CodeGen/delete.cpp
index f21d203..69640aa 100644
--- a/clang/test/CIR/CodeGen/delete.cpp
+++ b/clang/test/CIR/CodeGen/delete.cpp
@@ -21,7 +21,7 @@ void test_sized_delete(SizedDelete *x) {
// CIR: cir.func dso_local @_Z17test_sized_deleteP11SizedDelete
// CIR: %[[X:.*]] = cir.load{{.*}} %{{.*}}
-// CIR: %[[X_CAST:.*]] = cir.cast(bitcast, %[[X]] : !cir.ptr<!rec_SizedDelete>), !cir.ptr<!void>
+// CIR: %[[X_CAST:.*]] = cir.cast bitcast %[[X]] : !cir.ptr<!rec_SizedDelete> -> !cir.ptr<!void>
// CIR: %[[OBJ_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CIR: cir.call @_ZN11SizedDeletedlEPvm(%[[X_CAST]], %[[OBJ_SIZE]]) nothrow : (!cir.ptr<!void>, !u64i) -> ()
@@ -62,7 +62,7 @@ Container::~Container() { delete contents; }
// CIR: %[[CONTENTS_PTR_ADDR:.*]] = cir.get_member %[[THIS]][0] {name = "contents"} : !cir.ptr<!rec_Container> -> !cir.ptr<!cir.ptr<!rec_Contents>>
// CIR: %[[CONTENTS_PTR:.*]] = cir.load{{.*}} %[[CONTENTS_PTR_ADDR]]
// CIR: cir.call @_ZN8ContentsD2Ev(%[[CONTENTS_PTR]]) nothrow : (!cir.ptr<!rec_Contents>) -> ()
-// CIR: %[[CONTENTS_CAST:.*]] = cir.cast(bitcast, %[[CONTENTS_PTR]] : !cir.ptr<!rec_Contents>), !cir.ptr<!void>
+// CIR: %[[CONTENTS_CAST:.*]] = cir.cast bitcast %[[CONTENTS_PTR]] : !cir.ptr<!rec_Contents> -> !cir.ptr<!void>
// CIR: %[[OBJ_SIZE:.*]] = cir.const #cir.int<1> : !u64i
// CIR: cir.call @_ZdlPvm(%[[CONTENTS_CAST]], %[[OBJ_SIZE]]) nothrow : (!cir.ptr<!void>, !u64i) -> ()
diff --git a/clang/test/CIR/CodeGen/destructors.cpp b/clang/test/CIR/CodeGen/destructors.cpp
index fde0732..1ede156 100644
--- a/clang/test/CIR/CodeGen/destructors.cpp
+++ b/clang/test/CIR/CodeGen/destructors.cpp
@@ -64,7 +64,7 @@ void test_array_destructor() {
// CIR: cir.func dso_local @_Z21test_array_destructorv()
// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!rec_array_element x 5>, !cir.ptr<!cir.array<!rec_array_element x 5>>, ["arr", init]
// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr<!rec_array_element>, !cir.ptr<!cir.ptr<!rec_array_element>>, ["arrayinit.temp", init]
-// CIR: %[[BEGIN:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!rec_array_element x 5>>)
+// CIR: %[[BEGIN:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!rec_array_element x 5>>
// CIR: cir.store{{.*}} %[[BEGIN]], %[[ARR_PTR]]
// CIR: %[[FIVE:.*]] = cir.const #cir.int<5> : !s64i
// CIR: %[[ARR_END:.*]] = cir.ptr_stride(%[[BEGIN]] : !cir.ptr<!rec_array_element>, %[[FIVE]] : !s64i)
@@ -80,7 +80,7 @@ void test_array_destructor() {
// CIR: cir.condition(%[[CMP]])
// CIR: }
// CIR: %[[FOUR:.*]] = cir.const #cir.int<4> : !u64i
-// CIR: %[[BEGIN:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr<!cir.array<!rec_array_element x 5>>)
+// CIR: %[[BEGIN:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!rec_array_element x 5>>
// CIR: %[[END:.*]] = cir.ptr_stride(%[[BEGIN]] : !cir.ptr<!rec_array_element>, %[[FOUR]] : !u64i)
// CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr<!rec_array_element>, !cir.ptr<!cir.ptr<!rec_array_element>>, ["__array_idx"]
// CIR: cir.store %[[END]], %[[ARR_PTR]]
diff --git a/clang/test/CIR/CodeGen/finegrain-bitfield-access.cpp b/clang/test/CIR/CodeGen/finegrain-bitfield-access.cpp
index 930b0a9..d9ccd27 100644
--- a/clang/test/CIR/CodeGen/finegrain-bitfield-access.cpp
+++ b/clang/test/CIR/CodeGen/finegrain-bitfield-access.cpp
@@ -70,7 +70,7 @@ void write8_1() {
// CIR-LABEL: @_Z8write8_1v
// CIR: [[CONST3:%.*]] = cir.const #cir.int<3> : !s32i
-// CIR: [[INT3:%.*]] = cir.cast(integral, [[CONST3]] : !s32i), !u32i
+// CIR: [[INT3:%.*]] = cir.cast integral [[CONST3]] : !s32i -> !u32i
// CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[1] {name = "f3"} : !cir.ptr<!rec_S1> -> !cir.ptr<!u8i>
// CIR: cir.set_bitfield align(1) (#bfi_f3, [[MEMBER]] : !cir.ptr<!u8i>, [[INT3]] : !u32i) -> !u32i
@@ -116,7 +116,7 @@ void write8_2() {
// CIR-LABEL: @_Z8write8_2v
// CIR: [[CONST3:%.*]] = cir.const #cir.int<3> : !s32i
-// CIR: [[INT3:%.*]] = cir.cast(integral, [[CONST3]] : !s32i), !u32i
+// CIR: [[INT3:%.*]] = cir.cast integral [[CONST3]] : !s32i -> !u32i
// CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[2] {name = "f5"} : !cir.ptr<!rec_S1> -> !cir.ptr<!u16i>
// CIR: cir.set_bitfield align(2) (#bfi_f5, %3 : !cir.ptr<!u16i>, {{.*}} : !u32i) -> !u32i
@@ -141,7 +141,7 @@ unsigned read16_1() {
// CIR-LABEL: @_Z8read16_1v
// CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[0] {name = "f1"} : !cir.ptr<!rec_S2> -> !cir.ptr<!u16i>
// CIR: [[BITFI:%.*]] = cir.get_bitfield align(8) (#bfi_f1, [[MEMBER]] : !cir.ptr<!u16i>) -> !u64i
-// CIR: [[BFCAST:%.*]] = cir.cast(integral, [[BITFI]] : !u64i), !u32i
+// CIR: [[BFCAST:%.*]] = cir.cast integral [[BITFI]] : !u64i -> !u32i
// CIR: cir.store [[BFCAST]], {{.*}} : !u32i, !cir.ptr<!u32i>
// CIR: [[RET:%.*]] = cir.load {{.*}} : !cir.ptr<!u32i>, !u32i
// CIR: cir.return [[RET]] : !u32i
@@ -167,7 +167,7 @@ unsigned read16_2() {
// CIR-LABEL: @_Z8read16_2v
// CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[1] {name = "f2"} : !cir.ptr<!rec_S2> -> !cir.ptr<!u16i>
// CIR: [[BITFI:%.*]] = cir.get_bitfield align(2) (#bfi_f2, [[MEMBER]] : !cir.ptr<!u16i>) -> !u64i
-// CIR: [[BFCAST:%.*]] = cir.cast(integral, [[BITFI]] : !u64i), !u32i
+// CIR: [[BFCAST:%.*]] = cir.cast integral [[BITFI]] : !u64i -> !u32i
// CIR: cir.store [[BFCAST]], {{.*}} : !u32i, !cir.ptr<!u32i>
// CIR: [[RET:%.*]] = cir.load {{.*}} : !cir.ptr<!u32i>, !u32i
// CIR: cir.return [[RET]] : !u32i
@@ -192,7 +192,7 @@ void write16_1() {
// CIR-LABEL: @_Z9write16_1v
// CIR: [[CONST5:%.*]] = cir.const #cir.int<5> : !s32i
-// CIR: [[INT5:%.*]] = cir.cast(integral, [[CONST5]] : !s32i), !u64i
+// CIR: [[INT5:%.*]] = cir.cast integral [[CONST5]] : !s32i -> !u64i
// CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[0] {name = "f1"} : !cir.ptr<!rec_S2> -> !cir.ptr<!u16i>
// CIR: cir.set_bitfield align(8) (#bfi_f1, [[MEMBER]] : !cir.ptr<!u16i>, [[INT5]] : !u64i) -> !u64i
// CIR: cir.return
@@ -212,7 +212,7 @@ void write16_2() {
// CIR-LABEL: @_Z9write16_2v
// CIR: [[CONST5:%.*]] = cir.const #cir.int<5> : !s32i
-// CIR: [[INT5:%.*]] = cir.cast(integral, [[CONST5]] : !s32i), !u64i
+// CIR: [[INT5:%.*]] = cir.cast integral [[CONST5]] : !s32i -> !u64i
// CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[1] {name = "f2"} : !cir.ptr<!rec_S2> -> !cir.ptr<!u16i>
// CIR: cir.set_bitfield align(2) (#bfi_f2, [[MEMBER]] : !cir.ptr<!u16i>, {{.*}} : !u64i) -> !u64i
// CIR: cir.return
@@ -232,7 +232,7 @@ unsigned read32_1() {
// CIR-LABEL: @_Z8read32_1v
// CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[1] {name = "f3"} : !cir.ptr<!rec_S3> -> !cir.ptr<!u32i>
// CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_f3_1, [[MEMBER]] : !cir.ptr<!u32i>) -> !u64i
-// CIR: [[BFCAST:%.*]] = cir.cast(integral, [[BITFI]] : !u64i), !u32i
+// CIR: [[BFCAST:%.*]] = cir.cast integral [[BITFI]] : !u64i -> !u32i
// CIR: cir.store [[BFCAST]], {{.*}} : !u32i, !cir.ptr<!u32i>
// CIR: [[RET:%.*]] = cir.load {{.*}} : !cir.ptr<!u32i>, !u32i
// CIR: cir.return [[RET]] : !u32i
@@ -257,7 +257,7 @@ void write32_1() {
// CIR-LABEL: @_Z9write32_1v
// CIR: [[CONST5:%.*]] = cir.const #cir.int<5> : !s32i
-// CIR: [[INT5:%.*]] = cir.cast(integral, [[CONST5]] : !s32i), !u64i
+// CIR: [[INT5:%.*]] = cir.cast integral [[CONST5]] : !s32i -> !u64i
// CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[1] {name = "f3"} : !cir.ptr<!rec_S3> -> !cir.ptr<!u32i>
// CIR: cir.set_bitfield align(4) (#bfi_f3_1, [[MEMBER]] : !cir.ptr<!u32i>, [[INT5]] : !u64i) -> !u64i
// CIR: cir.return
diff --git a/clang/test/CIR/CodeGen/if.cpp b/clang/test/CIR/CodeGen/if.cpp
index daaec8a..823539b 100644
--- a/clang/test/CIR/CodeGen/if.cpp
+++ b/clang/test/CIR/CodeGen/if.cpp
@@ -74,7 +74,7 @@ void if1(int a) {
// CIR: cir.func{{.*}} @_Z3if1i(%arg0: !s32i loc({{.*}}))
// CIR: cir.scope {
// CIR: %3 = cir.load{{.*}} %0 : !cir.ptr<!s32i>, !s32i
-// CIR: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool
+// CIR: %4 = cir.cast int_to_bool %3 : !s32i -> !cir.bool
// CIR-NEXT: cir.if %4 {
// CIR-NEXT: %5 = cir.const #cir.int<3> : !s32i
// CIR-NEXT: cir.store{{.*}} %5, %1 : !s32i, !cir.ptr<!s32i>
@@ -141,7 +141,7 @@ void if2(int a, bool b, bool c) {
// CIR: cir.func{{.*}} @_Z3if2ibb(%arg0: !s32i loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}}))
// CIR: cir.scope {
// CIR: %5 = cir.load{{.*}} %0 : !cir.ptr<!s32i>, !s32i
-// CIR: %6 = cir.cast(int_to_bool, %5 : !s32i), !cir.bool
+// CIR: %6 = cir.cast int_to_bool %5 : !s32i -> !cir.bool
// CIR: cir.if %6 {
// CIR: %7 = cir.const #cir.int<3> : !s32i
// CIR: cir.store{{.*}} %7, %3 : !s32i, !cir.ptr<!s32i>
@@ -267,7 +267,7 @@ int if_init() {
// CIR: %[[CONST42:.*]] = cir.const #cir.int<42> : !s32i
// CIR: cir.store{{.*}} %[[CONST42]], %[[X]] : !s32i, !cir.ptr<!s32i>
// CIR: %[[X_VAL:.*]] = cir.load{{.*}} %[[X]] : !cir.ptr<!s32i>, !s32i
-// CIR: %[[COND:.*]] = cir.cast(int_to_bool, %[[X_VAL]] : !s32i), !cir.bool
+// CIR: %[[COND:.*]] = cir.cast int_to_bool %[[X_VAL]] : !s32i -> !cir.bool
// CIR: cir.if %[[COND]] {
// CIR: %[[X_IF:.*]] = cir.load{{.*}} %[[X]] : !cir.ptr<!s32i>, !s32i
// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
diff --git a/clang/test/CIR/CodeGen/int-to-bool.cpp b/clang/test/CIR/CodeGen/int-to-bool.cpp
index ad36af4..97b799b 100644
--- a/clang/test/CIR/CodeGen/int-to-bool.cpp
+++ b/clang/test/CIR/CodeGen/int-to-bool.cpp
@@ -10,7 +10,7 @@ bool f1(unsigned char c) {
}
// CIR: cir.func{{.*}} @_Z2f1h
-// CIR: cir.cast(int_to_bool, %{{.*}} : !u8i), !cir.bool
+// CIR: cir.cast int_to_bool %{{.*}} : !u8i -> !cir.bool
// Note: The full zext/store/load/trunc sequence is checked here to show what
// CIR is being lowered to. There's no need to check it for every function since
@@ -33,7 +33,7 @@ bool f2(short s) {
}
// CIR: cir.func{{.*}} @_Z2f2s
-// CIR: cir.cast(int_to_bool, %{{.*}} : !s16i), !cir.bool
+// CIR: cir.cast int_to_bool %{{.*}} : !s16i -> !cir.bool
// LLVM: define{{.*}} i1 @_Z2f2s
// LLVM: %[[CMP:.*]] = icmp ne i16 %4, 0
@@ -48,7 +48,7 @@ bool f3(unsigned u) {
}
// CIR: cir.func{{.*}} @_Z2f3j
-// CIR: cir.cast(int_to_bool, %{{.*}} : !u32i), !cir.bool
+// CIR: cir.cast int_to_bool %{{.*}} : !u32i -> !cir.bool
// LLVM: define{{.*}} i1 @_Z2f3j
// LLVM: %[[CMP:.*]] = icmp ne i32 %4, 0
@@ -63,7 +63,7 @@ bool f4(long l) {
}
// CIR: cir.func{{.*}} @_Z2f4l
-// CIR: cir.cast(int_to_bool, %{{.*}} : !s64i), !cir.bool
+// CIR: cir.cast int_to_bool %{{.*}} : !s64i -> !cir.bool
// LLVM: define{{.*}} i1 @_Z2f4l
// LLVM: %[[CMP:.*]] = icmp ne i64 %4, 0
diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp
index 0eba0bb..b30589c 100644
--- a/clang/test/CIR/CodeGen/loop.cpp
+++ b/clang/test/CIR/CodeGen/loop.cpp
@@ -205,10 +205,10 @@ void l4() {
// CIR: %[[N_ADDR:.*]] = cir.alloca {{.*}} ["n", init]
// CIR: cir.store{{.*}} %[[A_ADDR]], %[[RANGE_ADDR]]
// CIR: %[[RANGE_LOAD:.*]] = cir.load{{.*}} %[[RANGE_ADDR]]
-// CIR: %[[RANGE_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[RANGE_LOAD]] : {{.*}})
+// CIR: %[[RANGE_CAST:.*]] = cir.cast array_to_ptrdecay %[[RANGE_LOAD]] : {{.*}}
// CIR: cir.store{{.*}} %[[RANGE_CAST]], %[[BEGIN_ADDR]]
// CIR: %[[BEGIN:.*]] = cir.load{{.*}} %[[RANGE_ADDR]]
-// CIR: %[[BEGIN_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[BEGIN]] : {{.*}})
+// CIR: %[[BEGIN_CAST:.*]] = cir.cast array_to_ptrdecay %[[BEGIN]] : {{.*}}
// CIR: %[[TEN:.*]] = cir.const #cir.int<10>
// CIR: %[[END_PTR:.*]] = cir.ptr_stride(%[[BEGIN_CAST]] : {{.*}}, %[[TEN]] : {{.*}})
// CIR: cir.store{{.*}} %[[END_PTR]], %[[END_ADDR]]
@@ -312,7 +312,7 @@ void l5() {
// CIR: %[[BEGIN_ADDR:.*]] = cir.alloca {{.*}} ["__begin1", init]
// CIR: %[[END_ADDR:.*]] = cir.alloca {{.*}} ["__end1", init]
// CIR: %[[X_ADDR:.*]] = cir.alloca {{.*}} ["x", init]
-// CIR: %[[ARR_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_ADDR]] : {{.*}})
+// CIR: %[[ARR_CAST:.*]] = cir.cast array_to_ptrdecay %[[ARR_ADDR]] : {{.*}}
// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CIR: cir.store{{.*}} %[[ONE]], %[[ARR_CAST]]
// CIR: %[[OFFSET1:.*]] = cir.const #cir.int<1> : !s64i
@@ -329,10 +329,10 @@ void l5() {
// CIR: cir.store{{.*}} %[[FOUR]], %[[STRIDE3]]
// CIR: cir.store{{.*}} %[[ARR_ADDR]], %[[RANGE_ADDR]]
// CIR: %[[RANGE_LOAD:.*]] = cir.load{{.*}} %[[RANGE_ADDR]]
-// CIR: %[[RANGE_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[RANGE_LOAD]] : {{.*}})
+// CIR: %[[RANGE_CAST:.*]] = cir.cast array_to_ptrdecay %[[RANGE_LOAD]] : {{.*}}
// CIR: cir.store{{.*}} %[[RANGE_CAST]], %[[BEGIN_ADDR]]
// CIR: %[[BEGIN:.*]] = cir.load{{.*}} %[[RANGE_ADDR]]
-// CIR: %[[BEGIN_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[BEGIN]] : {{.*}})
+// CIR: %[[BEGIN_CAST:.*]] = cir.cast array_to_ptrdecay %[[BEGIN]] : {{.*}}
// CIR: %[[FOUR:.*]] = cir.const #cir.int<4> : !s64i
// CIR: %[[END_PTR:.*]] = cir.ptr_stride(%[[BEGIN_CAST]] : {{.*}}, %[[FOUR]] : {{.*}})
// CIR: cir.store{{.*}} %[[END_PTR]], %[[END_ADDR]]
@@ -445,7 +445,7 @@ void test_do_while_false() {
// CIR-NEXT: cir.yield
// CIR-NEXT: } while {
// CIR-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
-// CIR-NEXT: %[[FALSE:.*]] = cir.cast(int_to_bool, %[[ZERO]] : !s32i), !cir.bool
+// CIR-NEXT: %[[FALSE:.*]] = cir.cast int_to_bool %[[ZERO]] : !s32i -> !cir.bool
// CIR-NEXT: cir.condition(%[[FALSE]])
// LLVM: define{{.*}} void @_Z19test_do_while_falsev()
diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp
index b14bf07..91dae3f 100644
--- a/clang/test/CIR/CodeGen/new.cpp
+++ b/clang/test/CIR/CodeGen/new.cpp
@@ -22,15 +22,15 @@ void test_basic_new() {
// CHECK: %[[PD_ADDR:.*]] = cir.alloca !cir.ptr<!cir.double>, !cir.ptr<!cir.ptr<!cir.double>>, ["pd", init]
// CHECK: %[[EIGHT:.*]] = cir.const #cir.int<8>
// CHECK: %[[NEW_S:.*]] = cir.call @_Znwm(%[[EIGHT]])
-// CHECK: %[[NEW_S_PTR:.*]] = cir.cast(bitcast, %[[NEW_S]]
+// CHECK: %[[NEW_S_PTR:.*]] = cir.cast bitcast %[[NEW_S]]
// CHECK: cir.store{{.*}} %[[NEW_S_PTR]], %[[PS_ADDR]]
// CHECK: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK: %[[NEW_INT:.*]] = cir.call @_Znwm(%[[FOUR]])
-// CHECK: %[[NEW_INT_PTR:.*]] = cir.cast(bitcast, %[[NEW_INT]]
+// CHECK: %[[NEW_INT_PTR:.*]] = cir.cast bitcast %[[NEW_INT]]
// CHECK: cir.store{{.*}} %[[NEW_INT_PTR]], %[[PN_ADDR]]
// CHECK: %[[EIGHT:.*]] = cir.const #cir.int<8>
// CHECK: %[[NEW_DOUBLE:.*]] = cir.call @_Znwm(%[[EIGHT]])
-// CHECK: %[[NEW_DOUBLE_PTR:.*]] = cir.cast(bitcast, %[[NEW_DOUBLE]]
+// CHECK: %[[NEW_DOUBLE_PTR:.*]] = cir.cast bitcast %[[NEW_DOUBLE]]
// CHECK: cir.store{{.*}} %[[NEW_DOUBLE_PTR]], %[[PD_ADDR]]
// CHECK: cir.return
@@ -68,13 +68,13 @@ void test_new_with_init() {
// CHECK: %[[PD_ADDR:.*]] = cir.alloca !cir.ptr<!cir.double>, !cir.ptr<!cir.ptr<!cir.double>>, ["pd", init]
// CHECK: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK: %[[NEW_INT:.*]] = cir.call @_Znwm(%[[FOUR]])
-// CHECK: %[[NEW_INT_PTR:.*]] = cir.cast(bitcast, %[[NEW_INT]]
+// CHECK: %[[NEW_INT_PTR:.*]] = cir.cast bitcast %[[NEW_INT]]
// CHECK: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK: cir.store{{.*}} %[[TWO]], %[[NEW_INT_PTR]]
// CHECK: cir.store{{.*}} %[[NEW_INT_PTR]], %[[PN_ADDR]]
// CHECK: %[[EIGHT:.*]] = cir.const #cir.int<8>
// CHECK: %[[NEW_DOUBLE:.*]] = cir.call @_Znwm(%[[EIGHT]])
-// CHECK: %[[NEW_DOUBLE_PTR:.*]] = cir.cast(bitcast, %[[NEW_DOUBLE]]
+// CHECK: %[[NEW_DOUBLE_PTR:.*]] = cir.cast bitcast %[[NEW_DOUBLE]]
// CHECK: %[[THREE:.*]] = cir.const #cir.fp<3.000000e+00>
// CHECK: cir.store{{.*}} %[[THREE]], %[[NEW_DOUBLE_PTR]]
// CHECK: cir.store{{.*}} %[[NEW_DOUBLE_PTR]], %[[PD_ADDR]]
@@ -119,12 +119,12 @@ void test_new_with_ctor() {
// CHECK: %[[PS2_2_ADDR:.*]] = cir.alloca !cir.ptr<!rec_S2>, !cir.ptr<!cir.ptr<!rec_S2>>, ["ps2_2", init]
// CHECK: %[[EIGHT:.*]] = cir.const #cir.int<8>
// CHECK: %[[NEW_S2:.*]] = cir.call @_Znwm(%[[EIGHT]])
-// CHECK: %[[NEW_S2_PTR:.*]] = cir.cast(bitcast, %[[NEW_S2]]
+// CHECK: %[[NEW_S2_PTR:.*]] = cir.cast bitcast %[[NEW_S2]]
// CHECK: cir.call @_ZN2S2C1Ev(%[[NEW_S2_PTR]])
// CHECK: cir.store{{.*}} %[[NEW_S2_PTR]], %[[PS2_ADDR]]
// CHECK: %[[EIGHT:.*]] = cir.const #cir.int<8>
// CHECK: %[[NEW_S2_2:.*]] = cir.call @_Znwm(%[[EIGHT]])
-// CHECK: %[[NEW_S2_2_PTR:.*]] = cir.cast(bitcast, %[[NEW_S2_2]]
+// CHECK: %[[NEW_S2_2_PTR:.*]] = cir.cast bitcast %[[NEW_S2_2]]
// CHECK: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK: cir.call @_ZN2S2C1Eii(%[[NEW_S2_2_PTR]], %[[ONE]], %[[TWO]])
@@ -161,7 +161,7 @@ void test_new_with_complex_type() {
// CHECK: %0 = cir.alloca !cir.ptr<!cir.complex<!cir.float>>, !cir.ptr<!cir.ptr<!cir.complex<!cir.float>>>, ["a", init]
// CHECK: %1 = cir.const #cir.int<8> : !u64i
// CHECK: %2 = cir.call @_Znwm(%1) : (!u64i) -> !cir.ptr<!void>
-// CHECK: %3 = cir.cast(bitcast, %2 : !cir.ptr<!void>), !cir.ptr<!cir.complex<!cir.float>>
+// CHECK: %3 = cir.cast bitcast %2 : !cir.ptr<!void> -> !cir.ptr<!cir.complex<!cir.float>>
// CHECK: %4 = cir.const #cir.const_complex<#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float> : !cir.complex<!cir.float>
// CHECK: cir.store align(8) %4, %3 : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
// CHECK: cir.store align(8) %3, %0 : !cir.ptr<!cir.complex<!cir.float>>, !cir.ptr<!cir.ptr<!cir.complex<!cir.float>>>
diff --git a/clang/test/CIR/CodeGen/no-prototype.c b/clang/test/CIR/CodeGen/no-prototype.c
index 4be6a94..728c4b8 100644
--- a/clang/test/CIR/CodeGen/no-prototype.c
+++ b/clang/test/CIR/CodeGen/no-prototype.c
@@ -51,7 +51,7 @@ int test3(int x) {
// CHECK: cir.func dso_local @test3
return noProto3(x);
// CHECK: [[GGO:%.*]] = cir.get_global @noProto3 : !cir.ptr<!cir.func<(...) -> !s32i>>
- // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr<!cir.func<(...) -> !s32i>>), !cir.ptr<!cir.func<(!s32i) -> !s32i>>
+ // CHECK: [[CAST:%.*]] = cir.cast bitcast [[GGO]] : !cir.ptr<!cir.func<(...) -> !s32i>> -> !cir.ptr<!cir.func<(!s32i) -> !s32i>>
// CHECK: {{%.*}} = cir.call [[CAST]](%{{[0-9]+}}) : (!cir.ptr<!cir.func<(!s32i) -> !s32i>>, !s32i) -> !s32i
}
@@ -68,7 +68,7 @@ int noProto4() { return 0; }
int test4(int x) {
return noProto4(x); // Even if we know the definition, this should compile.
// CHECK: [[GGO:%.*]] = cir.get_global @noProto4 : !cir.ptr<!cir.func<() -> !s32i>>
- // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr<!cir.func<() -> !s32i>>), !cir.ptr<!cir.func<(!s32i) -> !s32i>>
+ // CHECK: [[CAST:%.*]] = cir.cast bitcast [[GGO]] : !cir.ptr<!cir.func<() -> !s32i>> -> !cir.ptr<!cir.func<(!s32i) -> !s32i>>
// CHECK: {{%.*}} = cir.call [[CAST]]({{%.*}}) : (!cir.ptr<!cir.func<(!s32i) -> !s32i>>, !s32i) -> !s32i
}
@@ -77,7 +77,7 @@ int noProto5();
int test5(int x) {
return noProto5();
// CHECK: [[GGO:%.*]] = cir.get_global @noProto5 : !cir.ptr<!cir.func<(!s32i) -> !s32i>>
- // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr<!cir.func<(!s32i) -> !s32i>>), !cir.ptr<!cir.func<() -> !s32i>>
+ // CHECK: [[CAST:%.*]] = cir.cast bitcast [[GGO]] : !cir.ptr<!cir.func<(!s32i) -> !s32i>> -> !cir.ptr<!cir.func<() -> !s32i>>
// CHECK: {{%.*}} = cir.call [[CAST]]() : (!cir.ptr<!cir.func<() -> !s32i>>) -> !s32i
}
int noProto5(int x) { return x; }
diff --git a/clang/test/CIR/CodeGen/opaque.c b/clang/test/CIR/CodeGen/opaque.c
index 96ecdfc..73f6402 100644
--- a/clang/test/CIR/CodeGen/opaque.c
+++ b/clang/test/CIR/CodeGen/opaque.c
@@ -17,8 +17,8 @@ void foo2() {
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[A_REAL_BOOL:.*]] = cir.cast(float_to_bool, %[[A_REAL]] : !cir.float), !cir.bool
-// CIR: %[[A_IMAG_BOOL:.*]] = cir.cast(float_to_bool, %[[A_IMAG]] : !cir.float), !cir.bool
+// CIR: %[[A_REAL_BOOL:.*]] = cir.cast float_to_bool %[[A_REAL]] : !cir.float -> !cir.bool
+// CIR: %[[A_IMAG_BOOL:.*]] = cir.cast float_to_bool %[[A_IMAG]] : !cir.float -> !cir.bool
// CIR: %[[CONST_TRUE:.*]] = cir.const #true
// CIR: %[[COND:.*]] = cir.select if %[[A_REAL_BOOL]] then %[[CONST_TRUE]] else %[[A_IMAG_BOOL]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool
// CIR: %[[RESULT:.*]] = cir.ternary(%[[COND]], true {
diff --git a/clang/test/CIR/CodeGen/opaque.cpp b/clang/test/CIR/CodeGen/opaque.cpp
index a48c013..028bfd9 100644
--- a/clang/test/CIR/CodeGen/opaque.cpp
+++ b/clang/test/CIR/CodeGen/opaque.cpp
@@ -35,8 +35,8 @@ void foo2() {
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[A_REAL_BOOL:.*]] = cir.cast(float_to_bool, %[[A_REAL]] : !cir.float), !cir.bool
-// CIR: %[[A_IMAG_BOOL:.*]] = cir.cast(float_to_bool, %[[A_IMAG]] : !cir.float), !cir.bool
+// CIR: %[[A_REAL_BOOL:.*]] = cir.cast float_to_bool %[[A_REAL]] : !cir.float -> !cir.bool
+// CIR: %[[A_IMAG_BOOL:.*]] = cir.cast float_to_bool %[[A_IMAG]] : !cir.float -> !cir.bool
// CIR: %[[CONST_TRUE:.*]] = cir.const #true
// CIR: %[[COND:.*]] = cir.select if %[[A_REAL_BOOL]] then %[[CONST_TRUE]] else %[[A_IMAG_BOOL]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool
// CIR: %[[RESULT:.*]] = cir.ternary(%[[COND]], true {
@@ -111,7 +111,7 @@ void foo3() {
// CIR: %[[B_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b"]
// CIR: %[[C_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["c", init]
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!s32i>, !s32i
-// CIR: %[[A_BOOL:.*]] = cir.cast(int_to_bool, %[[TMP_A]] : !s32i), !cir.bool
+// CIR: %[[A_BOOL:.*]] = cir.cast int_to_bool %[[TMP_A]] : !s32i -> !cir.bool
// CIR: %[[RESULT:.*]] = cir.ternary(%[[A_BOOL]], true {
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.yield %[[TMP_A]] : !s32i
diff --git a/clang/test/CIR/CodeGen/pointers.cpp b/clang/test/CIR/CodeGen/pointers.cpp
index dcfcc72..2c3dbb0 100644
--- a/clang/test/CIR/CodeGen/pointers.cpp
+++ b/clang/test/CIR/CodeGen/pointers.cpp
@@ -24,7 +24,7 @@ void foo(int *iptr, char *cptr, unsigned ustride) {
// Must convert unsigned stride to a signed one.
iptr - ustride;
// CHECK: %[[#STRIDE:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
- // CHECK: %[[#SIGNSTRIDE:]] = cir.cast(integral, %[[#STRIDE]] : !u32i), !s32i
+ // CHECK: %[[#SIGNSTRIDE:]] = cir.cast integral %[[#STRIDE]] : !u32i -> !s32i
// CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#SIGNSTRIDE]]) : !s32i, !s32i
// CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr<!s32i>, %[[#NEGSTRIDE]] : !s32i), !cir.ptr<!s32i>
diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp
index 1dc16f3..96db82a 100644
--- a/clang/test/CIR/CodeGen/struct.cpp
+++ b/clang/test/CIR/CodeGen/struct.cpp
@@ -154,3 +154,32 @@ void choose_expr() {
// OGCG: %[[B_ADDR:.*]] = alloca %struct.CompleteS, align 4
// OGCG: %[[C_ADDR:.*]] = alloca %struct.CompleteS, align 4
// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[C_ADDR]], ptr align 4 %[[A_ADDR]], i64 8, i1 false)
+
+void generic_selection() {
+ CompleteS a;
+ CompleteS b;
+ int c;
+ CompleteS d = _Generic(c, int : a, default: b);
+}
+
+// CIR: cir.func{{.*}} @_Z17generic_selectionv()
+// CIR: %[[A_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr<!rec_CompleteS>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr<!rec_CompleteS>, ["b"]
+// CIR: %[[C_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["c"]
+// CIR: %[[D_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr<!rec_CompleteS>, ["d", init]
+// TODO(cir): Call to default copy constructor should be replaced by `cir.copy` op
+// CIR: cir.call @_ZN9CompleteSC1ERKS_(%[[D_ADDR]], %[[A_ADDR]]) nothrow : (!cir.ptr<!rec_CompleteS>, !cir.ptr<!rec_CompleteS>) -> ()
+
+// LLVM: define{{.*}} void @_Z17generic_selectionv()
+// LLVM: %1 = alloca %struct.CompleteS, i64 1, align 4
+// LLVM: %2 = alloca %struct.CompleteS, i64 1, align 4
+// LLVM: %3 = alloca i32, i64 1, align 4
+// LLVM: %4 = alloca %struct.CompleteS, i64 1, align 4
+// LLVM: call void @_ZN9CompleteSC1ERKS_(ptr %4, ptr %1)
+
+// OGCG: define{{.*}} void @_Z17generic_selectionv()
+// OGCG: %[[A_ADDR:.*]] = alloca %struct.CompleteS, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca %struct.CompleteS, align 4
+// OGCG: %[[C_ADDR:.*]] = alloca i32, align 4
+// OGCG: %[[D_ADDR:.*]] = alloca %struct.CompleteS, align 4
+// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[D_ADDR]], ptr align 4 %[[A_ADDR]], i64 8, i1 false)
diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp
index 781286a..eb38ee3 100644
--- a/clang/test/CIR/CodeGen/ternary.cpp
+++ b/clang/test/CIR/CodeGen/ternary.cpp
@@ -69,7 +69,7 @@ int foo(int a, int b) {
// CIR: [[ALOAD2:%.+]] = cir.load align(4) [[A]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.yield [[ALOAD2]] : !s32i
// CIR: }) : (!cir.bool) -> !s32i
-// CIR: [[CAST:%.+]] = cir.cast(int_to_bool, [[TERNARY_RES]] : !s32i), !cir.bool
+// CIR: [[CAST:%.+]] = cir.cast int_to_bool [[TERNARY_RES]] : !s32i -> !cir.bool
// CIR: cir.if [[CAST]] {
// CIR: [[ONE:%.+]] = cir.const #cir.int<1> : !s32i
// CIR: [[MINUS_ONE:%.+]] = cir.unary(minus, [[ONE]]) nsw : !s32i, !s32i
diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp
index c37524b..ac1ae34 100644
--- a/clang/test/CIR/CodeGen/unary.cpp
+++ b/clang/test/CIR/CodeGen/unary.cpp
@@ -410,10 +410,10 @@ void chars(char c) {
// CHECK: cir.func{{.*}} @_Z5charsc
int c1 = +c;
- // CHECK: %[[PROMO:.*]] = cir.cast(integral, %{{.+}} : !s8i), !s32i
+ // CHECK: %[[PROMO:.*]] = cir.cast integral %{{.+}} : !s8i -> !s32i
// CHECK: cir.unary(plus, %[[PROMO]]) : !s32i, !s32i
int c2 = -c;
- // CHECK: %[[PROMO:.*]] = cir.cast(integral, %{{.+}} : !s8i), !s32i
+ // CHECK: %[[PROMO:.*]] = cir.cast integral %{{.+}} : !s8i -> !s32i
// CHECK: cir.unary(minus, %[[PROMO]]) nsw : !s32i, !s32i
// Chars can go through some integer promotion codegen paths even when not promoted.
@@ -431,9 +431,9 @@ _Float16 fp16UPlus(_Float16 f) {
// CHECK: cir.func{{.*}} @_Z9fp16UPlusDF16_({{.*}}) -> !cir.f16
// CHECK: %[[INPUT:.*]] = cir.load{{.*}} %[[F:.*]]
-// CHECK: %[[PROMOTED:.*]] = cir.cast(floating, %[[INPUT]] : !cir.f16), !cir.float
+// CHECK: %[[PROMOTED:.*]] = cir.cast floating %[[INPUT]] : !cir.f16 -> !cir.float
// CHECK: %[[RESULT:.*]] = cir.unary(plus, %[[PROMOTED]])
-// CHECK: %[[UNPROMOTED:.*]] = cir.cast(floating, %[[RESULT]] : !cir.float), !cir.f16
+// CHECK: %[[UNPROMOTED:.*]] = cir.cast floating %[[RESULT]] : !cir.float -> !cir.f16
// LLVM: define{{.*}} half @_Z9fp16UPlusDF16_({{.*}})
// LLVM: %[[F_LOAD:.*]] = load half, ptr %{{.*}}, align 2
@@ -451,9 +451,9 @@ _Float16 fp16UMinus(_Float16 f) {
// CHECK: cir.func{{.*}} @_Z10fp16UMinusDF16_({{.*}}) -> !cir.f16
// CHECK: %[[INPUT:.*]] = cir.load{{.*}} %[[F:.*]]
-// CHECK: %[[PROMOTED:.*]] = cir.cast(floating, %[[INPUT]] : !cir.f16), !cir.float
+// CHECK: %[[PROMOTED:.*]] = cir.cast floating %[[INPUT]] : !cir.f16 -> !cir.float
// CHECK: %[[RESULT:.*]] = cir.unary(minus, %[[PROMOTED]])
-// CHECK: %[[UNPROMOTED:.*]] = cir.cast(floating, %[[RESULT]] : !cir.float), !cir.f16
+// CHECK: %[[UNPROMOTED:.*]] = cir.cast floating %[[RESULT]] : !cir.float -> !cir.f16
// LLVM: define{{.*}} half @_Z10fp16UMinusDF16_({{.*}})
// LLVM: %[[F_LOAD:.*]] = load half, ptr %{{.*}}, align 2
@@ -482,24 +482,24 @@ void test_logical_not() {
// CHECK: cir.func{{.*}} @_Z16test_logical_notv()
// CHECK: %[[A:.*]] = cir.load{{.*}} %[[A_ADDR:.*]] : !cir.ptr<!s32i>, !s32i
-// CHECK: %[[A_BOOL:.*]] = cir.cast(int_to_bool, %[[A]] : !s32i), !cir.bool
+// CHECK: %[[A_BOOL:.*]] = cir.cast int_to_bool %[[A]] : !s32i -> !cir.bool
// CHECK: %[[A_NOT:.*]] = cir.unary(not, %[[A_BOOL]]) : !cir.bool, !cir.bool
-// CHECK: %[[A_CAST:.*]] = cir.cast(bool_to_int, %[[A_NOT]] : !cir.bool), !s32i
+// CHECK: %[[A_CAST:.*]] = cir.cast bool_to_int %[[A_NOT]] : !cir.bool -> !s32i
// CHECK: cir.store{{.*}} %[[A_CAST]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
// CHECK: %[[B:.*]] = cir.load{{.*}} %[[B_ADDR:.*]] : !cir.ptr<!cir.bool>, !cir.bool
// CHECK: %[[B_NOT:.*]] = cir.unary(not, %[[B]]) : !cir.bool, !cir.bool
// CHECK: cir.store{{.*}} %[[B_NOT]], %[[B_ADDR]] : !cir.bool, !cir.ptr<!cir.bool>
// CHECK: %[[C:.*]] = cir.load{{.*}} %[[C_ADDR:.*]] : !cir.ptr<!cir.float>, !cir.float
-// CHECK: %[[C_BOOL:.*]] = cir.cast(float_to_bool, %[[C]] : !cir.float), !cir.bool
+// CHECK: %[[C_BOOL:.*]] = cir.cast float_to_bool %[[C]] : !cir.float -> !cir.bool
// CHECK: %[[C_NOT:.*]] = cir.unary(not, %[[C_BOOL]]) : !cir.bool, !cir.bool
-// CHECK: %[[C_CAST:.*]] = cir.cast(bool_to_float, %[[C_NOT]] : !cir.bool), !cir.float
+// CHECK: %[[C_CAST:.*]] = cir.cast bool_to_float %[[C_NOT]] : !cir.bool -> !cir.float
// CHECK: cir.store{{.*}} %[[C_CAST]], %[[C_ADDR]] : !cir.float, !cir.ptr<!cir.float>
// CHECK: %[[P:.*]] = cir.load{{.*}} %[[P_ADDR:.*]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
-// CHECK: %[[P_BOOL:.*]] = cir.cast(ptr_to_bool, %[[P]] : !cir.ptr<!s32i>), !cir.bool
+// CHECK: %[[P_BOOL:.*]] = cir.cast ptr_to_bool %[[P]] : !cir.ptr<!s32i> -> !cir.bool
// CHECK: %[[P_NOT:.*]] = cir.unary(not, %[[P_BOOL]]) : !cir.bool, !cir.bool
// CHECK: cir.store{{.*}} %[[P_NOT]], %[[B_ADDR]] : !cir.bool, !cir.ptr<!cir.bool>
// CHECK: %[[D:.*]] = cir.load{{.*}} %[[D_ADDR:.*]] : !cir.ptr<!cir.double>, !cir.double
-// CHECK: %[[D_BOOL:.*]] = cir.cast(float_to_bool, %[[D]] : !cir.double), !cir.bool
+// CHECK: %[[D_BOOL:.*]] = cir.cast float_to_bool %[[D]] : !cir.double -> !cir.bool
// CHECK: %[[D_NOT:.*]] = cir.unary(not, %[[D_BOOL]]) : !cir.bool, !cir.bool
// CHECK: cir.store{{.*}} %[[D_NOT]], %[[B_ADDR]] : !cir.bool, !cir.ptr<!cir.bool>
@@ -566,10 +566,10 @@ void f16NestedUPlus() {
// CHECK: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["a"]
// CHECK: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
// CHECK: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.f16>, !cir.f16
-// CHECK: %[[A_F32:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.f16), !cir.float
+// CHECK: %[[A_F32:.*]] = cir.cast floating %[[TMP_A]] : !cir.f16 -> !cir.float
// CHECK: %[[A_PLUS:.*]] = cir.unary(plus, %[[A_F32]]) : !cir.float, !cir.float
// CHECK: %[[RESULT_F32:.*]] = cir.unary(plus, %[[A_PLUS]]) : !cir.float, !cir.float
-// CHECK: %[[RESULT:.*]] = cir.cast(floating, %[[RESULT_F32]] : !cir.float), !cir.f16
+// CHECK: %[[RESULT:.*]] = cir.cast floating %[[RESULT_F32]] : !cir.float -> !cir.f16
// CHECK: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
// LLVM: define{{.*}} void @_Z14f16NestedUPlusv()
@@ -597,10 +597,10 @@ void f16NestedUMinus() {
// CHECK: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["a"]
// CHECK: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
// CHECK: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.f16>, !cir.f16
-// CHECK: %[[A_F32:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.f16), !cir.float
+// CHECK: %[[A_F32:.*]] = cir.cast floating %[[TMP_A]] : !cir.f16 -> !cir.float
// CHECK: %[[A_MINUS:.*]] = cir.unary(minus, %[[A_F32]]) : !cir.float, !cir.float
// CHECK: %[[RESULT_F32:.*]] = cir.unary(minus, %[[A_MINUS]]) : !cir.float, !cir.float
-// CHECK: %[[RESULT:.*]] = cir.cast(floating, %[[RESULT_F32]] : !cir.float), !cir.f16
+// CHECK: %[[RESULT:.*]] = cir.cast floating %[[RESULT_F32]] : !cir.float -> !cir.f16
// CHECK: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
// LLVM: define{{.*}} void @_Z15f16NestedUMinusv()
diff --git a/clang/test/CIR/CodeGen/union.c b/clang/test/CIR/CodeGen/union.c
index 23e862b..bda8e77 100644
--- a/clang/test/CIR/CodeGen/union.c
+++ b/clang/test/CIR/CodeGen/union.c
@@ -116,7 +116,7 @@ void shouldGenerateUnionAccess(union U2 u) {
// CIR-NEXT: %[[U:.*]] = cir.alloca !rec_U2, !cir.ptr<!rec_U2>, ["u", init] {alignment = 8 : i64}
// CIR-NEXT: cir.store{{.*}} %[[ARG]], %[[U]] : !rec_U2, !cir.ptr<!rec_U2>
// CIR-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
-// CIR-NEXT: %[[ZERO_CHAR:.*]] = cir.cast(integral, %[[ZERO]] : !s32i), !s8i
+// CIR-NEXT: %[[ZERO_CHAR:.*]] = cir.cast integral %[[ZERO]] : !s32i -> !s8i
// CIR-NEXT: %[[B_PTR:.*]] = cir.get_member %[[U]][0] {name = "b"} : !cir.ptr<!rec_U2> -> !cir.ptr<!s8i>
// CIR-NEXT: cir.store{{.*}} %[[ZERO_CHAR]], %[[B_PTR]] : !s8i, !cir.ptr<!s8i>
// CIR-NEXT: %[[B_PTR2:.*]] = cir.get_member %[[U]][0] {name = "b"} : !cir.ptr<!rec_U2> -> !cir.ptr<!s8i>
@@ -174,10 +174,10 @@ void f3(union U3 u) {
// CIR-NEXT: %[[U:.*]] = cir.alloca !rec_U3, !cir.ptr<!rec_U3>, ["u", init] {alignment = 1 : i64}
// CIR-NEXT: cir.store{{.*}} %[[ARG]], %[[U]] : !rec_U3, !cir.ptr<!rec_U3>
// CIR-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
-// CIR-NEXT: %[[ZERO_CHAR:.*]] = cir.cast(integral, %[[ZERO]] : !s32i), !s8i
+// CIR-NEXT: %[[ZERO_CHAR:.*]] = cir.cast integral %[[ZERO]] : !s32i -> !s8i
// CIR-NEXT: %[[IDX:.*]] = cir.const #cir.int<2> : !s32i
// CIR-NEXT: %[[C_PTR:.*]] = cir.get_member %[[U]][0] {name = "c"} : !cir.ptr<!rec_U3> -> !cir.ptr<!cir.array<!s8i x 5>>
-// CIR-NEXT: %[[C_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[C_PTR]] : !cir.ptr<!cir.array<!s8i x 5>>), !cir.ptr<!s8i>
+// CIR-NEXT: %[[C_DECAY:.*]] = cir.cast array_to_ptrdecay %[[C_PTR]] : !cir.ptr<!cir.array<!s8i x 5>> -> !cir.ptr<!s8i>
// CIR-NEXT: %[[ELEM_PTR:.*]] = cir.ptr_stride(%[[C_DECAY]] : !cir.ptr<!s8i>, %[[IDX]] : !s32i), !cir.ptr<!s8i>
// CIR-NEXT: cir.store{{.*}} %[[ZERO_CHAR]], %[[ELEM_PTR]] : !s8i, !cir.ptr<!s8i>
// CIR-NEXT: cir.return
@@ -206,10 +206,10 @@ void f5(union U4 u) {
// CIR-NEXT: %[[U:.*]] = cir.alloca !rec_U4, !cir.ptr<!rec_U4>, ["u", init] {alignment = 4 : i64}
// CIR-NEXT: cir.store{{.*}} %[[ARG]], %[[U]] : !rec_U4, !cir.ptr<!rec_U4>
// CIR-NEXT: %[[CHAR_VAL:.*]] = cir.const #cir.int<65> : !s32i
-// CIR-NEXT: %[[CHAR_CAST:.*]] = cir.cast(integral, %[[CHAR_VAL]] : !s32i), !s8i
+// CIR-NEXT: %[[CHAR_CAST:.*]] = cir.cast integral %[[CHAR_VAL]] : !s32i -> !s8i
// CIR-NEXT: %[[IDX:.*]] = cir.const #cir.int<4> : !s32i
// CIR-NEXT: %[[C_PTR:.*]] = cir.get_member %[[U]][0] {name = "c"} : !cir.ptr<!rec_U4> -> !cir.ptr<!cir.array<!s8i x 5>>
-// CIR-NEXT: %[[C_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[C_PTR]] : !cir.ptr<!cir.array<!s8i x 5>>), !cir.ptr<!s8i>
+// CIR-NEXT: %[[C_DECAY:.*]] = cir.cast array_to_ptrdecay %[[C_PTR]] : !cir.ptr<!cir.array<!s8i x 5>> -> !cir.ptr<!s8i>
// CIR-NEXT: %[[ELEM_PTR:.*]] = cir.ptr_stride(%[[C_DECAY]] : !cir.ptr<!s8i>, %[[IDX]] : !s32i), !cir.ptr<!s8i>
// CIR-NEXT: cir.store{{.*}} %[[CHAR_CAST]], %[[ELEM_PTR]] : !s8i, !cir.ptr<!s8i>
// CIR-NEXT: cir.return
diff --git a/clang/test/CIR/CodeGen/var_arg.c b/clang/test/CIR/CodeGen/var_arg.c
index e9c4acb..f5b92c6 100644
--- a/clang/test/CIR/CodeGen/var_arg.c
+++ b/clang/test/CIR/CodeGen/var_arg.c
@@ -23,13 +23,13 @@ int varargs(int count, ...) {
// CIR: %[[VAAREA:.+]] = cir.alloca !cir.array<!rec___va_list_tag x 1>, !cir.ptr<!cir.array<!rec___va_list_tag x 1>>, ["args"]
// CIR: %[[RES_ADDR:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["res", init]
// CIR: cir.store %arg0, %[[COUNT_ADDR]] : !s32i, !cir.ptr<!s32i>
-// CIR: %[[VA_PTR0:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>), !cir.ptr<!rec___va_list_tag>
+// CIR: %[[VA_PTR0:.+]] = cir.cast array_to_ptrdecay %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>> -> !cir.ptr<!rec___va_list_tag>
// CIR: %[[COUNT_VAL:.+]] = cir.load{{.*}} %[[COUNT_ADDR]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.va_start %[[VA_PTR0]] %[[COUNT_VAL]] : !cir.ptr<!rec___va_list_tag>, !s32i
-// CIR: %[[VA_PTR1:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>), !cir.ptr<!rec___va_list_tag>
+// CIR: %[[VA_PTR1:.+]] = cir.cast array_to_ptrdecay %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>> -> !cir.ptr<!rec___va_list_tag>
// CIR: %[[VA_ARG:.+]] = cir.va_arg %[[VA_PTR1]] : (!cir.ptr<!rec___va_list_tag>) -> !s32i
// CIR: cir.store{{.*}} %[[VA_ARG]], %[[RES_ADDR]] : !s32i, !cir.ptr<!s32i>
-// CIR: %[[VA_PTR2:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>), !cir.ptr<!rec___va_list_tag>
+// CIR: %[[VA_PTR2:.+]] = cir.cast array_to_ptrdecay %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>> -> !cir.ptr<!rec___va_list_tag>
// CIR: cir.va_end %[[VA_PTR2]] : !cir.ptr<!rec___va_list_tag>
// CIR: %[[RESULT:.+]] = cir.load{{.*}} %[[RES_ADDR]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store %[[RESULT]], %[[RET_ADDR]] : !s32i, !cir.ptr<!s32i>
@@ -99,13 +99,13 @@ int stdarg_start(int count, ...) {
// CIR: %[[VAAREA:.+]] = cir.alloca !cir.array<!rec___va_list_tag x 1>, !cir.ptr<!cir.array<!rec___va_list_tag x 1>>, ["args"]
// CIR: %[[RES_ADDR:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["res", init]
// CIR: cir.store %arg0, %[[COUNT_ADDR]] : !s32i, !cir.ptr<!s32i>
-// CIR: %[[VA_PTR0:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>), !cir.ptr<!rec___va_list_tag>
+// CIR: %[[VA_PTR0:.+]] = cir.cast array_to_ptrdecay %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>> -> !cir.ptr<!rec___va_list_tag>
// CIR: %[[C12345:.+]] = cir.const #cir.int<12345> : !s32i
// CIR: cir.va_start %[[VA_PTR0]] %[[C12345]] : !cir.ptr<!rec___va_list_tag>, !s32i
-// CIR: %[[VA_PTR1:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>), !cir.ptr<!rec___va_list_tag>
+// CIR: %[[VA_PTR1:.+]] = cir.cast array_to_ptrdecay %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>> -> !cir.ptr<!rec___va_list_tag>
// CIR: %[[VA_ARG:.+]] = cir.va_arg %[[VA_PTR1]] : (!cir.ptr<!rec___va_list_tag>) -> !s32i
// CIR: cir.store{{.*}} %[[VA_ARG]], %[[RES_ADDR]] : !s32i, !cir.ptr<!s32i>
-// CIR: %[[VA_PTR2:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>>), !cir.ptr<!rec___va_list_tag>
+// CIR: %[[VA_PTR2:.+]] = cir.cast array_to_ptrdecay %[[VAAREA]] : !cir.ptr<!cir.array<!rec___va_list_tag x 1>> -> !cir.ptr<!rec___va_list_tag>
// CIR: cir.va_end %[[VA_PTR2]] : !cir.ptr<!rec___va_list_tag>
// CIR: %[[RESULT:.+]] = cir.load{{.*}} %[[RES_ADDR]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.store %[[RESULT]], %[[RET_ADDR]] : !s32i, !cir.ptr<!s32i>
diff --git a/clang/test/CIR/CodeGen/variable-decomposition.cpp b/clang/test/CIR/CodeGen/variable-decomposition.cpp
index 40dfe73..ba59109 100644
--- a/clang/test/CIR/CodeGen/variable-decomposition.cpp
+++ b/clang/test/CIR/CodeGen/variable-decomposition.cpp
@@ -27,7 +27,7 @@ float function() {
// CIR: cir.store{{.*}} %[[TWO_FP]], %[[MEMBER_B]]
// CIR: %[[MEMBER_A:.+]] = cir.get_member %[[STRUCT]][0] {name = "a"} : !cir.ptr<!rec_some_struct> -> !cir.ptr<!s32i>
// CIR: %[[LOAD_A:.+]] = cir.load align(4) %[[MEMBER_A]] : !cir.ptr<!s32i>, !s32i
-// CIR: %[[CAST_A:.+]] = cir.cast(int_to_float, %[[LOAD_A]] : !s32i), !cir.float
+// CIR: %[[CAST_A:.+]] = cir.cast int_to_float %[[LOAD_A]] : !s32i -> !cir.float
// CIR: %[[MEMBER_B:.+]] = cir.get_member %[[STRUCT]][1] {name = "b"} : !cir.ptr<!rec_some_struct> -> !cir.ptr<!cir.float>
// CIR: %[[LOAD_B:.+]] = cir.load align(4) %[[MEMBER_B]] : !cir.ptr<!cir.float>, !cir.float
// CIR: %[[ADD:.+]] = cir.binop(add, %[[CAST_A]], %[[LOAD_B]]) : !cir.float
diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp
index 4d57f8e..86469c5 100644
--- a/clang/test/CIR/CodeGen/vbase.cpp
+++ b/clang/test/CIR/CodeGen/vbase.cpp
@@ -62,15 +62,15 @@ void ppp() { B b; }
// CIR: cir.call @_ZN7DerivedC1Ev(%[[D]]) nothrow : (!cir.ptr<!rec_Derived>) -> ()
// CIR: %[[VPTR_PTR:.+]] = cir.vtable.get_vptr %[[D]] : !cir.ptr<!rec_Derived> -> !cir.ptr<!cir.vptr>
// CIR: %[[VPTR:.+]] = cir.load {{.*}} %[[VPTR_PTR]] : !cir.ptr<!cir.vptr>, !cir.vptr
-// CIR: %[[VPTR_I8:.+]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr<!u8i>
+// CIR: %[[VPTR_I8:.+]] = cir.cast bitcast %[[VPTR]] : !cir.vptr -> !cir.ptr<!u8i>
// CIR: %[[NEG32:.+]] = cir.const #cir.int<-32> : !s64i
// CIR: %[[ADJ_VPTR_I8:.+]] = cir.ptr_stride(%[[VPTR_I8]] : !cir.ptr<!u8i>, %[[NEG32]] : !s64i), !cir.ptr<!u8i>
-// CIR: %[[OFFSET_PTR:.+]] = cir.cast(bitcast, %[[ADJ_VPTR_I8]] : !cir.ptr<!u8i>), !cir.ptr<!s64i>
+// CIR: %[[OFFSET_PTR:.+]] = cir.cast bitcast %[[ADJ_VPTR_I8]] : !cir.ptr<!u8i> -> !cir.ptr<!s64i>
// CIR: %[[OFFSET:.+]] = cir.load {{.*}} %[[OFFSET_PTR]] : !cir.ptr<!s64i>, !s64i
-// CIR: %[[D_I8:.+]] = cir.cast(bitcast, %[[D]] : !cir.ptr<!rec_Derived>), !cir.ptr<!u8i>
+// CIR: %[[D_I8:.+]] = cir.cast bitcast %[[D]] : !cir.ptr<!rec_Derived> -> !cir.ptr<!u8i>
// CIR: %[[ADJ_THIS_I8:.+]] = cir.ptr_stride(%[[D_I8]] : !cir.ptr<!u8i>, %[[OFFSET]] : !s64i), !cir.ptr<!u8i>
-// CIR: %[[ADJ_THIS_D:.+]] = cir.cast(bitcast, %[[ADJ_THIS_I8]] : !cir.ptr<!u8i>), !cir.ptr<!rec_Derived>
-// CIR: %[[BASE_THIS:.+]] = cir.cast(bitcast, %[[ADJ_THIS_D]] : !cir.ptr<!rec_Derived>), !cir.ptr<!rec_Base>
+// CIR: %[[ADJ_THIS_D:.+]] = cir.cast bitcast %[[ADJ_THIS_I8]] : !cir.ptr<!u8i> -> !cir.ptr<!rec_Derived>
+// CIR: %[[BASE_THIS:.+]] = cir.cast bitcast %[[ADJ_THIS_D]] : !cir.ptr<!rec_Derived> -> !cir.ptr<!rec_Base>
// CIR: %[[BASE_VPTR_PTR:.+]] = cir.vtable.get_vptr %[[BASE_THIS]] : !cir.ptr<!rec_Base> -> !cir.ptr<!cir.vptr>
// CIR: %[[BASE_VPTR:.+]] = cir.load {{.*}} %[[BASE_VPTR_PTR]] : !cir.ptr<!cir.vptr>, !cir.vptr
// CIR: %[[SLOT_PTR:.+]] = cir.vtable.get_virtual_fn_addr %[[BASE_VPTR]][0] : !cir.vptr -> !cir.ptr<!cir.ptr<!cir.func<(!cir.ptr<!rec_Base>)>>>
diff --git a/clang/test/CIR/CodeGen/vector-ext.cpp b/clang/test/CIR/CodeGen/vector-ext.cpp
index 8bca48d..2fd493f 100644
--- a/clang/test/CIR/CodeGen/vector-ext.cpp
+++ b/clang/test/CIR/CodeGen/vector-ext.cpp
@@ -1048,7 +1048,7 @@ void foo17() {
// CIR: %[[VEC_A:.*]] = cir.alloca !cir.vector<2 x !cir.double>, !cir.ptr<!cir.vector<2 x !cir.double>>, ["a"]
// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[VEC_A]] : !cir.ptr<!cir.vector<2 x !cir.double>>, !cir.vector<2 x !cir.double>
-// CIR: %[[RES:.*]] = cir.cast(float_to_int, %[[TMP]] : !cir.vector<2 x !cir.double>), !cir.vector<2 x !u16i>
+// CIR: %[[RES:.*]] = cir.cast float_to_int %[[TMP]] : !cir.vector<2 x !cir.double> -> !cir.vector<2 x !u16i>
// LLVM: %[[VEC_A:.*]] = alloca <2 x double>, i64 1, align 16
// LLVM: %[[TMP:.*]] = load <2 x double>, ptr %[[VEC_A]], align 16
@@ -1228,11 +1228,11 @@ void foo24() {
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>, ["b"]
// CIR: %[[C_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>, ["c", init]
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.vector<4 x !cir.f16>>, !cir.vector<4 x !cir.f16>
-// CIR: %[[TMP_A_F16:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.vector<4 x !cir.f16>), !cir.vector<4 x !cir.float>
+// CIR: %[[TMP_A_F16:.*]] = cir.cast floating %[[TMP_A]] : !cir.vector<4 x !cir.f16> -> !cir.vector<4 x !cir.float>
// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.vector<4 x !cir.f16>>, !cir.vector<4 x !cir.f16>
-// CIR: %[[TMP_B_F16:.*]] = cir.cast(floating, %[[TMP_B]] : !cir.vector<4 x !cir.f16>), !cir.vector<4 x !cir.float>
+// CIR: %[[TMP_B_F16:.*]] = cir.cast floating %[[TMP_B]] : !cir.vector<4 x !cir.f16> -> !cir.vector<4 x !cir.float>
// CIR: %[[RESULT:.*]] = cir.binop(add, %[[TMP_A_F16]], %[[TMP_B_F16]]) : !cir.vector<4 x !cir.float>
-// CIR: %[[RESULT_VF16:.*]] = cir.cast(floating, %[[RESULT]] : !cir.vector<4 x !cir.float>), !cir.vector<4 x !cir.f16>
+// CIR: %[[RESULT_VF16:.*]] = cir.cast floating %[[RESULT]] : !cir.vector<4 x !cir.float> -> !cir.vector<4 x !cir.f16>
// CIR: cir.store{{.*}} %[[RESULT_VF16]], %[[C_ADDR]] : !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>
// LLVM: %[[A_ADDR:.*]] = alloca <4 x half>, i64 1, align 8
diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp
index f242779..86551d2 100644
--- a/clang/test/CIR/CodeGen/vector.cpp
+++ b/clang/test/CIR/CodeGen/vector.cpp
@@ -1035,7 +1035,7 @@ void foo17() {
// CIR: %[[VEC_A:.*]] = cir.alloca !cir.vector<2 x !cir.double>, !cir.ptr<!cir.vector<2 x !cir.double>>, ["a"]
// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[VEC_A]] : !cir.ptr<!cir.vector<2 x !cir.double>>, !cir.vector<2 x !cir.double>
-// CIR: %[[RES:.*]] = cir.cast(float_to_int, %[[TMP]] : !cir.vector<2 x !cir.double>), !cir.vector<2 x !u16i>
+// CIR: %[[RES:.*]] = cir.cast float_to_int %[[TMP]] : !cir.vector<2 x !cir.double> -> !cir.vector<2 x !u16i>
// LLVM: %[[VEC_A:.*]] = alloca <2 x double>, i64 1, align 16
// LLVM: %[[TMP:.*]] = load <2 x double>, ptr %[[VEC_A]], align 16
@@ -1270,11 +1270,11 @@ void foo27() {
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>, ["b"]
// CIR: %[[C_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>, ["c", init]
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.vector<4 x !cir.f16>>, !cir.vector<4 x !cir.f16>
-// CIR: %[[TMP_A_F16:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.vector<4 x !cir.f16>), !cir.vector<4 x !cir.float>
+// CIR: %[[TMP_A_F16:.*]] = cir.cast floating %[[TMP_A]] : !cir.vector<4 x !cir.f16> -> !cir.vector<4 x !cir.float>
// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.vector<4 x !cir.f16>>, !cir.vector<4 x !cir.f16>
-// CIR: %[[TMP_B_F16:.*]] = cir.cast(floating, %[[TMP_B]] : !cir.vector<4 x !cir.f16>), !cir.vector<4 x !cir.float>
+// CIR: %[[TMP_B_F16:.*]] = cir.cast floating %[[TMP_B]] : !cir.vector<4 x !cir.f16> -> !cir.vector<4 x !cir.float>
// CIR: %[[RESULT:.*]] = cir.binop(add, %[[TMP_A_F16]], %[[TMP_B_F16]]) : !cir.vector<4 x !cir.float>
-// CIR: %[[RESULT_VF16:.*]] = cir.cast(floating, %[[RESULT]] : !cir.vector<4 x !cir.float>), !cir.vector<4 x !cir.f16>
+// CIR: %[[RESULT_VF16:.*]] = cir.cast floating %[[RESULT]] : !cir.vector<4 x !cir.float> -> !cir.vector<4 x !cir.f16>
// CIR: cir.store{{.*}} %[[RESULT_VF16]], %[[C_ADDR]] : !cir.vector<4 x !cir.f16>, !cir.ptr<!cir.vector<4 x !cir.f16>>
// LLVM: %[[A_ADDR:.*]] = alloca <4 x half>, i64 1, align 8
diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp
index baab972..f47da41 100644
--- a/clang/test/CIR/CodeGen/vtt.cpp
+++ b/clang/test/CIR/CodeGen/vtt.cpp
@@ -281,23 +281,23 @@ D::D() {}
// CIR-COMMON: %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
// CIR-COMMON: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]]
// CIR-COMMON: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
-// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.cast bitcast %[[VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>> -> !cir.ptr<!cir.vptr>
// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]]
// CIR-COMMON: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
// CIR-COMMON: cir.store{{.*}} %[[VPTR]], %[[B_VPTR_ADDR]]
// CIR-COMMON: %[[B_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
-// CIR-COMMON: %[[B_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[B_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR-COMMON: %[[B_VPTR_ADDR:.*]] = cir.cast bitcast %[[B_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>> -> !cir.ptr<!cir.vptr>
// CIR-COMMON: %[[B_VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]]
// CIR-COMMON: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]]
-// CIR-COMMON: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr<!u8i>
+// CIR-COMMON: %[[VPTR_ADDR2:.*]] = cir.cast bitcast %[[VPTR]] : !cir.vptr -> !cir.ptr<!u8i>
// CIR-COMMON: %[[CONST_24:.*]] = cir.const #cir.int<-24>
// CIR-COMMON: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr<!u8i>, %[[CONST_24]] : !s64i), !cir.ptr<!u8i>
-// CIR-COMMON: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i>
+// CIR-COMMON: %[[BASE_OFFSET_PTR:.*]] = cir.cast bitcast %[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i> -> !cir.ptr<!s64i>
// CIR-COMMON: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr<!s64i>, !s64i
-// CIR-COMMON: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr<!rec_B>), !cir.ptr<!u8i>
+// CIR-COMMON: %[[THIS_PTR:.*]] = cir.cast bitcast %[[THIS]] : !cir.ptr<!rec_B> -> !cir.ptr<!u8i>
// CIR-COMMON: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i>
-// CIR-COMMON: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr<!u8i>), !cir.ptr<!rec_B>
+// CIR-COMMON: %[[BASE_CAST:.*]] = cir.cast bitcast %[[BASE_PTR]] : !cir.ptr<!u8i> -> !cir.ptr<!rec_B>
// CIR-COMMON: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]]
// CIR-COMMON: cir.store{{.*}} %[[B_VPTR]], %[[BASE_VPTR_ADDR]]
@@ -347,23 +347,23 @@ D::D() {}
// CIR-COMMON: %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
// CIR-COMMON: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]]
// CIR-COMMON: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
-// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.cast bitcast %[[VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>> -> !cir.ptr<!cir.vptr>
// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]]
// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
// CIR-COMMON: cir.store{{.*}} %[[VPTR]], %[[C_VPTR_ADDR]]
// CIR-COMMON: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
-// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.cast bitcast %[[C_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>> -> !cir.ptr<!cir.vptr>
// CIR-COMMON: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]]
// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]]
-// CIR-COMMON: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr<!u8i>
+// CIR-COMMON: %[[VPTR_ADDR2:.*]] = cir.cast bitcast %[[VPTR]] : !cir.vptr -> !cir.ptr<!u8i>
// CIR-COMMON: %[[CONST_24:.*]] = cir.const #cir.int<-24>
// CIR-COMMON: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr<!u8i>, %[[CONST_24]] : !s64i), !cir.ptr<!u8i>
-// CIR-COMMON: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i>
+// CIR-COMMON: %[[BASE_OFFSET_PTR:.*]] = cir.cast bitcast %[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i> -> !cir.ptr<!s64i>
// CIR-COMMON: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr<!s64i>, !s64i
-// CIR-COMMON: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr<!rec_C>), !cir.ptr<!u8i>
+// CIR-COMMON: %[[THIS_PTR:.*]] = cir.cast bitcast %[[THIS]] : !cir.ptr<!rec_C> -> !cir.ptr<!u8i>
// CIR-COMMON: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i>
-// CIR-COMMON: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr<!u8i>), !cir.ptr<!rec_C>
+// CIR-COMMON: %[[BASE_CAST:.*]] = cir.cast bitcast %[[BASE_PTR]] : !cir.ptr<!u8i> -> !cir.ptr<!rec_C>
// CIR-COMMON: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]]
// CIR-COMMON: cir.store{{.*}} %[[C_VPTR]], %[[BASE_VPTR_ADDR]]
@@ -419,27 +419,27 @@ D::D() {}
// CIR-COMMON: %[[C_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 3 -> !cir.ptr<!cir.ptr<!void>>
// CIR-COMMON: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!void>>) -> ()
// CIR-COMMON: %[[D_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
-// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.cast bitcast %[[D_VTT]] : !cir.ptr<!cir.ptr<!void>> -> !cir.ptr<!cir.vptr>
// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] : !cir.ptr<!cir.vptr>, !cir.vptr
// CIR-COMMON: %[[D_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
// CIR-COMMON: cir.store{{.*}} %[[VPTR]], %[[D_VPTR_ADDR]]
// CIR-COMMON: %[[D_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 5 -> !cir.ptr<!cir.ptr<!void>>
-// CIR-COMMON: %[[D_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR-COMMON: %[[D_VPTR_ADDR:.*]] = cir.cast bitcast %[[D_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>> -> !cir.ptr<!cir.vptr>
// CIR-COMMON: %[[D_VPTR:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR]] : !cir.ptr<!cir.vptr>, !cir.vptr
// CIR-COMMON: %[[D_VPTR_ADDR2:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_D> -> !cir.ptr<!cir.vptr>
// CIR-COMMON: %[[VPTR2:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR2]] : !cir.ptr<!cir.vptr>, !cir.vptr
-// CIR-COMMON: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR2]] : !cir.vptr), !cir.ptr<!u8i>
+// CIR-COMMON: %[[VPTR_ADDR2:.*]] = cir.cast bitcast %[[VPTR2]] : !cir.vptr -> !cir.ptr<!u8i>
// CIR-COMMON: %[[CONST_24:.*]] = cir.const #cir.int<-24> : !s64i
// CIR-COMMON: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr<!u8i>, %[[CONST_24]] : !s64i), !cir.ptr<!u8i>
-// CIR-COMMON: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i>
+// CIR-COMMON: %[[BASE_OFFSET_PTR:.*]] = cir.cast bitcast %[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i> -> !cir.ptr<!s64i>
// CIR-COMMON: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr<!s64i>, !s64i
-// CIR-COMMON: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr<!rec_D>), !cir.ptr<!u8i>
+// CIR-COMMON: %[[THIS_PTR:.*]] = cir.cast bitcast %[[THIS]] : !cir.ptr<!rec_D> -> !cir.ptr<!u8i>
// CIR-COMMON: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i>
-// CIR-COMMON: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr<!u8i>), !cir.ptr<!rec_D>
+// CIR-COMMON: %[[BASE_CAST:.*]] = cir.cast bitcast %[[BASE_PTR]] : !cir.ptr<!u8i> -> !cir.ptr<!rec_D>
// CIR-COMMON: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]]
// CIR-COMMON: cir.store{{.*}} %[[D_VPTR]], %[[BASE_VPTR_ADDR]]
// CIR-COMMON: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 6 -> !cir.ptr<!cir.ptr<!void>>
-// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.cast bitcast %[[C_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>> -> !cir.ptr<!cir.vptr>
// CIR-COMMON: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] : !cir.ptr<!cir.vptr>, !cir.vptr
// CIR-COMMON: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C>
// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-copy.c b/clang/test/CIR/CodeGenOpenACC/combined-copy.c
index b4573e6..c1dc938 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-copy.c
+++ b/clang/test/CIR/CodeGenOpenACC/combined-copy.c
@@ -1090,7 +1090,7 @@ void copy_member_of_array_element_member() {
for(int i = 0; i < 5; ++i);
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> : !s32i
// CHECK-NEXT: %[[GETINNER:.*]] = cir.get_member %[[OUTER]][0] {name = "inner"} : !cir.ptr<!rec_OuterTy> -> !cir.ptr<!cir.array<!rec_InnerTy x 4>>
- // CHECK-NEXT: %[[INNERDECAY:.*]] = cir.cast(array_to_ptrdecay, %[[GETINNER]] : !cir.ptr<!cir.array<!rec_InnerTy x 4>>), !cir.ptr<!rec_InnerTy>
+ // CHECK-NEXT: %[[INNERDECAY:.*]] = cir.cast array_to_ptrdecay %[[GETINNER]] : !cir.ptr<!cir.array<!rec_InnerTy x 4>> -> !cir.ptr<!rec_InnerTy>
// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[INNERDECAY]] : !cir.ptr<!rec_InnerTy>, %[[TWO]] : !s32i), !cir.ptr<!rec_InnerTy>
// CHECK-NEXT: %[[GETB:.*]] = cir.get_member %[[STRIDE]][1] {name = "b"} : !cir.ptr<!rec_InnerTy> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[COPYIN1:.*]] = acc.copyin varPtr(%[[GETB]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {dataClause = #acc<data_clause acc_copy>, name = "outer.inner[2].b"}
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp b/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp
index 57e70df..e836a37a 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp
@@ -87,9 +87,9 @@ struct HasDtor {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!s32i>, %[[ZERO]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr<!s32i>
@@ -97,7 +97,7 @@ struct HasDtor {
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[ONE_2]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
@@ -105,7 +105,7 @@ struct HasDtor {
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[TWO]] : !s64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[TWO_2]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
@@ -113,7 +113,7 @@ struct HasDtor {
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[THREE]] : !s64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[THREE_2]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
@@ -121,7 +121,7 @@ struct HasDtor {
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[FOUR]] : !s64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[FOUR_2]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
@@ -134,9 +134,9 @@ struct HasDtor {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!cir.float>, %[[ZERO]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr<!cir.float>
@@ -144,7 +144,7 @@ struct HasDtor {
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[ONE_2]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
@@ -152,7 +152,7 @@ struct HasDtor {
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[TWO]] : !s64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[TWO_2]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
@@ -160,7 +160,7 @@ struct HasDtor {
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[THREE]] : !s64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[THREE_2]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
@@ -168,7 +168,7 @@ struct HasDtor {
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[FOUR]] : !s64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[FOUR_2]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
@@ -181,37 +181,37 @@ struct HasDtor {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
//
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
//
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
//
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
//
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
//
@@ -224,37 +224,37 @@ struct HasDtor {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
//
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
//
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
//
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
//
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
//
@@ -267,37 +267,37 @@ struct HasDtor {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
//
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ONE]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
//
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[TWO]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
//
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[THREE]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
//
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
//
@@ -310,37 +310,37 @@ struct HasDtor {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ZERO]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
//
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
//
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[TWO]] : !s64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
//
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[THREE]] : !s64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
//
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[FOUR]] : !s64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
//
@@ -349,7 +349,7 @@ struct HasDtor {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}):
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr<!rec_HasDtor>, %[[LAST_IDX]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>, ["__array_idx"]
// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp b/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp
index 63932027..f636a0f 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp
@@ -1,4 +1,4 @@
-// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
struct NoCopyConstruct {};
@@ -66,7 +66,6 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
@@ -74,7 +73,6 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
@@ -82,7 +80,6 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
@@ -90,7 +87,6 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_13CopyConstruct : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_CopyConstruct x 5>, !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
@@ -98,7 +94,30 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_14NonDefaultCtor : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_NonDefaultCtor x 5>, !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_NonDefaultCtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
@@ -106,7 +125,6 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_7HasDtor : !cir.ptr<!cir.array<!rec_HasDtor x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasDtor x 5>, !cir.ptr<!cir.array<!rec_HasDtor x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -125,7 +143,7 @@ struct HasDtor {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasDtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_HasDtor>) -> ()
// CHECK-NEXT: cir.yield
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp
index 8cce119..3d295d5 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp
@@ -263,7 +263,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
@@ -305,7 +305,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -407,7 +407,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -509,7 +509,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -611,7 +611,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -714,7 +714,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
@@ -758,7 +758,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
@@ -800,7 +800,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -903,7 +903,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp
index 2265a9a..be33afe 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp
@@ -131,7 +131,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
@@ -160,7 +160,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -191,7 +191,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -222,7 +222,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -253,7 +253,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -285,7 +285,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
@@ -315,7 +315,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
@@ -344,7 +344,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -376,7 +376,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp
index a2b9d40..f13d96d 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp
@@ -310,7 +310,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
@@ -349,7 +349,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -372,7 +372,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -471,7 +471,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -494,7 +494,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -593,7 +593,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -616,7 +616,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -715,7 +715,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -738,7 +738,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -837,7 +837,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -861,7 +861,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
@@ -901,7 +901,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -925,7 +925,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
@@ -964,7 +964,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -988,7 +988,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -1087,7 +1087,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -1111,7 +1111,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
@@ -1151,7 +1151,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp
index e7caf83e..952fee9b 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp
@@ -134,7 +134,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
@@ -163,7 +163,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -194,7 +194,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -225,7 +225,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -256,7 +256,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -288,7 +288,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
@@ -318,7 +318,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
@@ -347,7 +347,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -379,7 +379,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp
index bf9aa0a..15646ed 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp
@@ -310,7 +310,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
@@ -349,7 +349,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -372,7 +372,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -471,7 +471,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -494,7 +494,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -593,7 +593,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -616,7 +616,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -715,7 +715,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -738,7 +738,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -837,7 +837,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -861,7 +861,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
@@ -901,7 +901,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -925,7 +925,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
@@ -964,7 +964,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -987,7 +987,7 @@ void acc_combined() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -1086,7 +1086,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -1111,7 +1111,7 @@ void acc_combined() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
@@ -1151,7 +1151,7 @@ void acc_combined() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
diff --git a/clang/test/CIR/CodeGenOpenACC/combined.cpp b/clang/test/CIR/CodeGenOpenACC/combined.cpp
index b814033..98f2ffd 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined.cpp
@@ -191,7 +191,7 @@ extern "C" void acc_combined(int N, int cond) {
#pragma acc serial loop self(N)
for(unsigned I = 0; I < N; ++I);
// CHECK-NEXT: %[[N_LOAD:.*]] = cir.load{{.*}} %[[ALLOCA_N]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[N_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[N_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.serial combined(loop) self(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.loop combined(serial) {
@@ -203,7 +203,7 @@ extern "C" void acc_combined(int N, int cond) {
#pragma acc parallel loop if(N)
for(unsigned I = 0; I < N; ++I);
// CHECK-NEXT: %[[N_LOAD:.*]] = cir.load{{.*}} %[[ALLOCA_N]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[N_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[N_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.parallel combined(loop) if(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.loop combined(parallel) {
@@ -215,7 +215,7 @@ extern "C" void acc_combined(int N, int cond) {
#pragma acc serial loop if(1)
for(unsigned I = 0; I < N; ++I);
// CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.serial combined(loop) if(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.loop combined(serial) {
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c
index 947b281..de6e7b0 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c
@@ -40,9 +40,9 @@ struct NoCopyConstruct {};
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!s32i>, %[[ZERO]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr<!s32i>
@@ -50,7 +50,7 @@ struct NoCopyConstruct {};
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[ONE_2]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
@@ -58,7 +58,7 @@ struct NoCopyConstruct {};
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[TWO]] : !s64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[TWO_2]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
@@ -66,7 +66,7 @@ struct NoCopyConstruct {};
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[THREE]] : !s64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[THREE_2]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
@@ -74,7 +74,7 @@ struct NoCopyConstruct {};
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[FOUR]] : !s64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[FOUR_2]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
@@ -87,9 +87,9 @@ struct NoCopyConstruct {};
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!cir.float>, %[[ZERO]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr<!cir.float>
@@ -97,7 +97,7 @@ struct NoCopyConstruct {};
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[ONE_2]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
@@ -105,7 +105,7 @@ struct NoCopyConstruct {};
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[TWO]] : !s64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[TWO_2]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
@@ -113,7 +113,7 @@ struct NoCopyConstruct {};
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[THREE]] : !s64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[THREE_2]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
@@ -121,7 +121,7 @@ struct NoCopyConstruct {};
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[FOUR]] : !s64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[FOUR_2]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
@@ -134,37 +134,37 @@ struct NoCopyConstruct {};
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.copy %[[FROM_OFFSET:.*]] to %[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>
//
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr<!rec_NoCopyConstruct>
//
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr<!rec_NoCopyConstruct>
//
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr<!rec_NoCopyConstruct>
//
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr<!rec_NoCopyConstruct>
//
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp
index 49fd78c..fca3ca8 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp
@@ -87,9 +87,9 @@ struct HasDtor {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!s32i>, %[[ZERO]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr<!s32i>
@@ -97,7 +97,7 @@ struct HasDtor {
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[ONE_2]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
@@ -105,7 +105,7 @@ struct HasDtor {
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[TWO]] : !s64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[TWO_2]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
@@ -113,7 +113,7 @@ struct HasDtor {
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[THREE]] : !s64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[THREE_2]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
@@ -121,7 +121,7 @@ struct HasDtor {
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[FOUR]] : !s64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[FOUR_2]] : !u64i), !cir.ptr<!s32i>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i>
@@ -134,9 +134,9 @@ struct HasDtor {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!cir.float>, %[[ZERO]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr<!cir.float>
@@ -144,7 +144,7 @@ struct HasDtor {
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[ONE_2]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
@@ -152,7 +152,7 @@ struct HasDtor {
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[TWO]] : !s64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[TWO_2]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
@@ -160,7 +160,7 @@ struct HasDtor {
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[THREE]] : !s64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[THREE_2]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
@@ -168,7 +168,7 @@ struct HasDtor {
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[FOUR]] : !s64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[FOUR_2]] : !u64i), !cir.ptr<!cir.float>
// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float
// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float>
@@ -181,37 +181,37 @@ struct HasDtor {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
//
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
//
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
//
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
//
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>), !cir.ptr<!rec_NoCopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct>
// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> ()
//
@@ -224,37 +224,37 @@ struct HasDtor {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
//
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
//
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
//
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
//
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>), !cir.ptr<!rec_CopyConstruct>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_CopyConstruct>
// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> ()
//
@@ -267,37 +267,37 @@ struct HasDtor {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
//
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ONE]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
//
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[TWO]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
//
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[THREE]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
//
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> ()
//
@@ -310,37 +310,37 @@ struct HasDtor {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } copy {
// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}):
-// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ZERO]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
//
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
//
// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[TWO]] : !s64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
//
// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[THREE]] : !s64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
//
// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4>
// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[FOUR]] : !s64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4>
-// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> ()
//
@@ -349,7 +349,7 @@ struct HasDtor {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}):
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr<!rec_HasDtor>, %[[LAST_IDX]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>, ["__array_idx"]
// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c
index 097005e..34b8b69 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c
@@ -27,7 +27,6 @@ struct NoCopyConstruct {};
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
@@ -35,7 +34,6 @@ struct NoCopyConstruct {};
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
@@ -43,7 +41,6 @@ struct NoCopyConstruct {};
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp
index 97399d9..af84684 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp
@@ -1,4 +1,4 @@
-// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
struct NoCopyConstruct {};
@@ -59,42 +59,60 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_13CopyConstruct : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_CopyConstruct x 5>, !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_14NonDefaultCtor : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_NonDefaultCtor x 5>, !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_NonDefaultCtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSA5_7HasDtor : !cir.ptr<!cir.array<!rec_HasDtor x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasDtor x 5>, !cir.ptr<!cir.array<!rec_HasDtor x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -113,7 +131,7 @@ struct HasDtor {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasDtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_HasDtor>) -> ()
// CHECK-NEXT: cir.yield
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c
index fff72dc..e357f44 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c
@@ -260,7 +260,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
@@ -289,7 +289,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -392,7 +392,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -494,7 +494,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -596,7 +596,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -699,7 +699,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
@@ -729,7 +729,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
@@ -758,7 +758,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -861,7 +861,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp
index c5b45f2..e0098bc 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp
@@ -263,7 +263,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
@@ -305,7 +305,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -407,7 +407,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -509,7 +509,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -611,7 +611,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -714,7 +714,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
@@ -758,7 +758,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
@@ -800,7 +800,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -903,7 +903,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c
index 5b0dcad..5336fad 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c
@@ -131,7 +131,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
@@ -160,7 +160,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -191,7 +191,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -222,7 +222,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -253,7 +253,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -285,7 +285,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
@@ -315,7 +315,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
@@ -344,7 +344,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -376,7 +376,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp
index 35a79d1..a513882 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp
@@ -132,7 +132,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
@@ -161,7 +161,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -192,7 +192,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -223,7 +223,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -254,7 +254,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -286,7 +286,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
@@ -316,7 +316,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
@@ -345,7 +345,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -377,7 +377,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp
index 1844440..1968c0a 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp
@@ -310,7 +310,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
@@ -349,7 +349,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -372,7 +372,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -471,7 +471,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -494,7 +494,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -593,7 +593,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -616,7 +616,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -715,7 +715,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -738,7 +738,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -837,7 +837,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -861,7 +861,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
@@ -901,7 +901,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -925,7 +925,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
@@ -964,7 +964,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -988,7 +988,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -1087,7 +1087,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -1111,7 +1111,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
@@ -1151,7 +1151,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c
index 363e885..f63e340 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c
@@ -132,7 +132,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
@@ -161,7 +161,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -192,7 +192,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -223,7 +223,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -254,7 +254,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -286,7 +286,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
@@ -316,7 +316,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
@@ -345,7 +345,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -377,7 +377,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp
index a4320e6..48e5ac9 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp
@@ -134,7 +134,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
@@ -163,7 +163,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -194,7 +194,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -225,7 +225,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -256,7 +256,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -288,7 +288,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
@@ -318,7 +318,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
@@ -347,7 +347,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -379,7 +379,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp
index b56c169..6d204bc 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp
@@ -310,7 +310,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
@@ -349,7 +349,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -372,7 +372,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -471,7 +471,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -494,7 +494,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -593,7 +593,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -616,7 +616,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -715,7 +715,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -738,7 +738,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -837,7 +837,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -861,7 +861,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
@@ -901,7 +901,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -925,7 +925,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
@@ -964,7 +964,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -987,7 +987,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -1086,7 +1086,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -1111,7 +1111,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
@@ -1151,7 +1151,7 @@ void acc_compute() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c
index 0e815b7..35a7e7a 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c
@@ -132,7 +132,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!u32i x 5>, !cir.ptr<!cir.array<!u32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!u32i>, !cir.ptr<!cir.ptr<!u32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>>), !cir.ptr<!u32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!u32i>, !cir.ptr<!cir.ptr<!u32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!u32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!u32i>
@@ -161,7 +161,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_j : !cir.ptr<!cir.array<!u32i x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!u32i x 5>, !cir.ptr<!cir.array<!u32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>>), !cir.ptr<!u32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !u32i, !cir.ptr<!u32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -192,7 +192,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_j : !cir.ptr<!cir.array<!u32i x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!u32i x 5>, !cir.ptr<!cir.array<!u32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>>), !cir.ptr<!u32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i
// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !u32i, !cir.ptr<!u32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -223,7 +223,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_j : !cir.ptr<!cir.array<!u32i x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!u32i x 5>, !cir.ptr<!cir.array<!u32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>>), !cir.ptr<!u32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i
// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !u32i, !cir.ptr<!u32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -254,7 +254,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_j : !cir.ptr<!cir.array<!u32i x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!u32i x 5>, !cir.ptr<!cir.array<!u32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>>), !cir.ptr<!u32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i
// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !u32i, !cir.ptr<!u32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -286,7 +286,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!u32i x 5>, !cir.ptr<!cir.array<!u32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!u32i>, !cir.ptr<!cir.ptr<!u32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>>), !cir.ptr<!u32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!u32i>, !cir.ptr<!cir.ptr<!u32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!u32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!u32i>
@@ -316,7 +316,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!u32i x 5>, !cir.ptr<!cir.array<!u32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!u32i>, !cir.ptr<!cir.ptr<!u32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>>), !cir.ptr<!u32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!u32i>, !cir.ptr<!cir.ptr<!u32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!u32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!u32i>
@@ -345,7 +345,7 @@ void acc_compute() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_j : !cir.ptr<!cir.array<!u32i x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!u32i x 5>, !cir.ptr<!cir.array<!u32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>>), !cir.ptr<!u32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !u32i, !cir.ptr<!u32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -377,7 +377,7 @@ void acc_compute() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!u32i x 5>, !cir.ptr<!cir.array<!u32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!u32i>, !cir.ptr<!cir.ptr<!u32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>>), !cir.ptr<!u32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!u32i>, !cir.ptr<!cir.ptr<!u32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!u32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!u32i>
diff --git a/clang/test/CIR/CodeGenOpenACC/data.c b/clang/test/CIR/CodeGenOpenACC/data.c
index 1f6a76c..4e13f17f 100644
--- a/clang/test/CIR/CodeGenOpenACC/data.c
+++ b/clang/test/CIR/CodeGenOpenACC/data.c
@@ -87,7 +87,7 @@ void acc_data(int cond) {
#pragma acc data default(none) if(cond)
{}
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.data if(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.terminator
@@ -96,7 +96,7 @@ void acc_data(int cond) {
#pragma acc data default(none) if(1)
{}
// CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.data if(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.terminator
diff --git a/clang/test/CIR/CodeGenOpenACC/host_data.c b/clang/test/CIR/CodeGenOpenACC/host_data.c
index fa06d2a..bcfa175 100644
--- a/clang/test/CIR/CodeGenOpenACC/host_data.c
+++ b/clang/test/CIR/CodeGenOpenACC/host_data.c
@@ -38,7 +38,7 @@ void acc_host_data(int cond, int var1, int var2, int *arr) {
// CHECK-NEXT: %[[USE_DEV1:.*]] = acc.use_device varPtr(%[[V1]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "var1"}
// CHECK-NEXT: %[[USE_DEV2:.*]] = acc.use_device varPtr(%[[V2]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "var2"}
// CHECK-NEXT: %[[LOAD_COND:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[COND_BOOL:.*]] = cir.cast(int_to_bool, %[[LOAD_COND]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[COND_BOOL:.*]] = cir.cast int_to_bool %[[LOAD_COND]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[COND_CAST:.*]] = builtin.unrealized_conversion_cast %[[COND_BOOL]] : !cir.bool to i1
// CHECK-NEXT: acc.host_data if(%[[COND_CAST]]) dataOperands(%[[USE_DEV1]], %[[USE_DEV2]] : !cir.ptr<!s32i>, !cir.ptr<!s32i>) {
// CHECK-NEXT: acc.terminator
@@ -49,7 +49,7 @@ void acc_host_data(int cond, int var1, int var2, int *arr) {
// CHECK-NEXT: %[[USE_DEV1:.*]] = acc.use_device varPtr(%[[V1]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "var1"}
// CHECK-NEXT: %[[USE_DEV2:.*]] = acc.use_device varPtr(%[[V2]] : !cir.ptr<!s32i>) -> !cir.ptr<!s32i> {name = "var2"}
// CHECK-NEXT: %[[LOAD_COND:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[COND_BOOL:.*]] = cir.cast(int_to_bool, %[[LOAD_COND]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[COND_BOOL:.*]] = cir.cast int_to_bool %[[LOAD_COND]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[COND_CAST:.*]] = builtin.unrealized_conversion_cast %[[COND_BOOL]] : !cir.bool to i1
// CHECK-NEXT: acc.host_data if(%[[COND_CAST]]) dataOperands(%[[USE_DEV1]], %[[USE_DEV2]] : !cir.ptr<!s32i>, !cir.ptr<!s32i>) {
// CHECK-NEXT: acc.terminator
diff --git a/clang/test/CIR/CodeGenOpenACC/init.c b/clang/test/CIR/CodeGenOpenACC/init.c
index 805fb08..829850f 100644
--- a/clang/test/CIR/CodeGenOpenACC/init.c
+++ b/clang/test/CIR/CodeGenOpenACC/init.c
@@ -18,13 +18,13 @@ void acc_init(int cond) {
#pragma acc init if(cond)
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.init if(%[[BOOL_CONV]])
#pragma acc init if(1)
// CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i
- // CHECK-NEXT: %[[ONE_TO_BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[ONE_TO_BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[ONE_TO_BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.init if(%[[BOOL_CONV]])
@@ -40,7 +40,7 @@ void acc_init(int cond) {
#pragma acc init if(cond) device_num(cond) device_type(*)
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_CAST]] : !cir.bool to i1
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: %[[COND_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_LOAD]] : !s32i to si32
diff --git a/clang/test/CIR/CodeGenOpenACC/kernels.c b/clang/test/CIR/CodeGenOpenACC/kernels.c
index 9b10b74..9f33e54 100644
--- a/clang/test/CIR/CodeGenOpenACC/kernels.c
+++ b/clang/test/CIR/CodeGenOpenACC/kernels.c
@@ -29,7 +29,7 @@ void acc_kernels(int cond) {
// CHECK-NEXT: cir.scope {
// CHECK-NEXT: cir.while {
// CHECK-NEXT: %[[INT:.*]] = cir.const #cir.int<1>
- // CHECK-NEXT: %[[CAST:.*]] = cir.cast(int_to_bool, %[[INT]] :
+ // CHECK-NEXT: %[[CAST:.*]] = cir.cast int_to_bool %[[INT]]
// CHECK-NEXT: cir.condition(%[[CAST]])
// CHECK-NEXT: } do {
// CHECK-NEXT: cir.yield
@@ -49,7 +49,7 @@ void acc_kernels(int cond) {
#pragma acc kernels self(cond)
{}
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.kernels self(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.terminator
@@ -58,7 +58,7 @@ void acc_kernels(int cond) {
#pragma acc kernels self(0)
{}
// CHECK-NEXT: %[[ZERO_LITERAL:.*]] = cir.const #cir.int<0> : !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ZERO_LITERAL]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ZERO_LITERAL]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.kernels self(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.terminator
@@ -67,7 +67,7 @@ void acc_kernels(int cond) {
#pragma acc kernels if(cond)
{}
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.kernels if(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.terminator
@@ -76,7 +76,7 @@ void acc_kernels(int cond) {
#pragma acc kernels if(1)
{}
// CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.kernels if(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.terminator
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp b/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp
index d4fd4cc..6824f77 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp
@@ -1,4 +1,4 @@
-// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
struct NoCopyConstruct {};
@@ -66,7 +66,6 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
@@ -74,7 +73,6 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
@@ -82,7 +80,6 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
@@ -90,7 +87,6 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_13CopyConstruct : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_CopyConstruct x 5>, !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
@@ -98,7 +94,30 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_14NonDefaultCtor : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_NonDefaultCtor x 5>, !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NonDefaultCtor>
+// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_NonDefaultCtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+ // CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
//
@@ -106,7 +125,6 @@ struct HasDtor {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_7HasDtor : !cir.ptr<!cir.array<!rec_HasDtor x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasDtor x 5>, !cir.ptr<!cir.array<!rec_HasDtor x 5>>, ["openacc.private.init"]
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -125,7 +143,7 @@ struct HasDtor {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>), !cir.ptr<!rec_HasDtor>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasDtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_HasDtor>
// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_HasDtor>) -> ()
// CHECK-NEXT: cir.yield
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp
index 7130a2b..73b8fe2 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp
@@ -263,7 +263,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
@@ -305,7 +305,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -407,7 +407,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -509,7 +509,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -611,7 +611,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -714,7 +714,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
@@ -758,7 +758,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
@@ -800,7 +800,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -903,7 +903,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_DefaultOperators x 5>, !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>), !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_DefaultOperators>, !cir.ptr<!cir.ptr<!rec_DefaultOperators>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_DefaultOperators>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_DefaultOperators>
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp
index e549104..77c6138 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp
@@ -132,7 +132,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
@@ -161,7 +161,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -192,7 +192,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -223,7 +223,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -254,7 +254,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -286,7 +286,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
@@ -316,7 +316,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
@@ -345,7 +345,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -377,7 +377,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>>), !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!cir.float>, !cir.ptr<!cir.ptr<!cir.float>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.float>, %[[LAST_IDX]] : !s64i), !cir.ptr<!cir.float>
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp
index c2ece70..6ca0654 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp
@@ -310,7 +310,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
@@ -349,7 +349,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -372,7 +372,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -471,7 +471,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -494,7 +494,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -593,7 +593,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -616,7 +616,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -715,7 +715,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -738,7 +738,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -837,7 +837,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -861,7 +861,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
@@ -901,7 +901,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -925,7 +925,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
@@ -964,7 +964,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -988,7 +988,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -1087,7 +1087,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
@@ -1111,7 +1111,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsInline>
@@ -1151,7 +1151,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>), !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsInline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp
index f9169df..dd3c54f 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp
@@ -134,7 +134,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
@@ -163,7 +163,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -194,7 +194,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -225,7 +225,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -256,7 +256,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -288,7 +288,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
@@ -318,7 +318,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
@@ -347,7 +347,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i
@@ -379,7 +379,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!s32i>, %[[LAST_IDX]] : !s64i), !cir.ptr<!s32i>
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp
index a3bf173..d36f9c6 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp
@@ -310,7 +310,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
@@ -349,7 +349,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -372,7 +372,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <mul> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -471,7 +471,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -494,7 +494,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <max> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -593,7 +593,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -616,7 +616,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -715,7 +715,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -738,7 +738,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <iand> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -837,7 +837,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -861,7 +861,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
@@ -901,7 +901,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -925,7 +925,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
@@ -964,7 +964,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -987,7 +987,7 @@ void acc_loop() {
// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <land> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr<!s32i>
@@ -1086,7 +1086,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
@@ -1111,7 +1111,7 @@ void acc_loop() {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"]
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i
// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[LAST_IDX]] : !s64i), !cir.ptr<!rec_HasOperatorsOutline>
@@ -1151,7 +1151,7 @@ void acc_loop() {
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>), !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasOperatorsOutline>, %[[SIZE]] : !u64i), !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>
diff --git a/clang/test/CIR/CodeGenOpenACC/parallel.c b/clang/test/CIR/CodeGenOpenACC/parallel.c
index 5db174f..7080a8d 100644
--- a/clang/test/CIR/CodeGenOpenACC/parallel.c
+++ b/clang/test/CIR/CodeGenOpenACC/parallel.c
@@ -28,7 +28,7 @@ void acc_parallel(int cond) {
// CHECK-NEXT: cir.scope {
// CHECK-NEXT: cir.while {
// CHECK-NEXT: %[[INT:.*]] = cir.const #cir.int<1>
- // CHECK-NEXT: %[[CAST:.*]] = cir.cast(int_to_bool, %[[INT]] :
+ // CHECK-NEXT: %[[CAST:.*]] = cir.cast int_to_bool %[[INT]]
// CHECK-NEXT: cir.condition(%[[CAST]])
// CHECK-NEXT: } do {
// CHECK-NEXT: cir.yield
@@ -48,7 +48,7 @@ void acc_parallel(int cond) {
#pragma acc parallel self(cond)
{}
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.parallel self(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.yield
@@ -57,7 +57,7 @@ void acc_parallel(int cond) {
#pragma acc parallel self(0)
{}
// CHECK-NEXT: %[[ZERO_LITERAL:.*]] = cir.const #cir.int<0> : !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ZERO_LITERAL]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ZERO_LITERAL]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.parallel self(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.yield
@@ -66,7 +66,7 @@ void acc_parallel(int cond) {
#pragma acc parallel if(cond)
{}
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.parallel if(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.yield
@@ -75,7 +75,7 @@ void acc_parallel(int cond) {
#pragma acc parallel if(1)
{}
// CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.parallel if(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.yield
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-CtorDtor.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-CtorDtor.cpp
index c62ebe2..101f18e8 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-CtorDtor.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-CtorDtor.cpp
@@ -1,4 +1,4 @@
-// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
struct CtorDtor {
int i;
@@ -14,7 +14,33 @@ void do_things(unsigned A, unsigned B) {
// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSA5_8CtorDtor : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_CtorDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!rec_CtorDtor x 5>, !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, ["openacc.private.init"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// Init Section.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+//
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.array<!rec_CtorDtor x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.array<!rec_CtorDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -34,7 +60,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_CtorDtor>) -> ()
// CHECK-NEXT: cir.yield
@@ -55,7 +81,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_CtorDtor x 5>> {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!rec_CtorDtor x 5>, !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, ["openacc.private.init", init] {alignment = 16 : i64}
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<5> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[ONE_PAST_LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ARR_SIZE]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[DECAY]], %[[ARR_IDX]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
@@ -75,7 +101,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT:} destroy {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_CtorDtor x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.array<!rec_CtorDtor x 5>> {{.*}}):
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[LAST_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[ARR_IDX]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
@@ -100,7 +126,57 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_A5_8CtorDtor : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!rec_CtorDtor x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64}
-// TODO: Add Init here.
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> -> !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+//
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT:} destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -120,7 +196,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: } body {
//
// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> -> !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
//
// CHECK-NEXT: cir.scope {
@@ -139,7 +215,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_CtorDtor>) -> ()
// CHECK-NEXT: cir.yield
@@ -169,9 +245,9 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_A5_8CtorDtor : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!rec_CtorDtor x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>, ["openacc.private.init", init] {alignment = 16 : i64}
-// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast(bitcast, %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>), !cir.ptr<!cir.array<!rec_CtorDtor x 25>>
+// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> -> !cir.ptr<!cir.array<!rec_CtorDtor x 25>>
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<25> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BITCAST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 25>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BITCAST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 25>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[ONE_PAST_LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ARR_SIZE]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[DECAY]], %[[ARR_IDX]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
@@ -190,9 +266,9 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: acc.yield
// CHECK-NEXT:} destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> {{.*}}):
-// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast(bitcast, %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>), !cir.ptr<!cir.array<!rec_CtorDtor x 25>>
+// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> -> !cir.ptr<!cir.array<!rec_CtorDtor x 25>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<24> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BITCAST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 25>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BITCAST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 25>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[LAST_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[ARR_IDX]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
@@ -217,7 +293,78 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT:acc.private.recipe @privatization__Bcnt3__ZTSA5_A5_A5_8CtorDtor : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i
+// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
+// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> -> !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>
+// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> -> !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[BOUND1_STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[BOUND1_STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT:} destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -236,7 +383,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>>), !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> -> !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>
// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>
// CHECK-NEXT: cir.scope {
// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
@@ -253,7 +400,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BOUND3_STRIDE]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> -> !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
// CHECK-NEXT: cir.scope {
// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
@@ -270,7 +417,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[BOUND1_STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[BOUND1_STRIDE]]) nothrow : (!cir.ptr<!rec_CtorDtor>) -> ()
// CHECK-NEXT: cir.yield
@@ -310,7 +457,73 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_A5_A5_8CtorDtor : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64}
-// TODO: Add Init here.
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> -> !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+//
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> -> !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<5> : !u64i
+// CHECK-NEXT: %[[ARR_DECAY:.*]] = cir.cast array_to_ptrdecay %[[STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[ARR_DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ARR_SIZE]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[ARR_DECAY]], %[[ARR_IDX]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[IDX_LOAD]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[INC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr<!rec_CtorDtor>, %[[ONE]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.store %[[INC_STRIDE]], %[[ARR_IDX]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[LAST_ELT]]) : !cir.ptr<!rec_CtorDtor>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT:} destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -330,7 +543,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: } body {
//
// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>>), !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> -> !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>
// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>
//
// CHECK-NEXT: cir.scope {
@@ -349,10 +562,10 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>>), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5>> -> !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[ARR_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[ARR_DECAY:.*]] = cir.cast array_to_ptrdecay %[[STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[ARR_DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[LAST_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[ARR_IDX]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
@@ -395,9 +608,9 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_A5_A5_8CtorDtor : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>>, ["openacc.private.init", init] {alignment = 16 : i64}
-// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast(bitcast, %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>>), !cir.ptr<!cir.array<!rec_CtorDtor x 125>>
+// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> -> !cir.ptr<!cir.array<!rec_CtorDtor x 125>>
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<125> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BITCAST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 125>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BITCAST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 125>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[ONE_PAST_LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ARR_SIZE]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[DECAY]], %[[ARR_IDX]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
@@ -416,9 +629,9 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: acc.yield
// CHECK-NEXT:} destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> {{.*}})):
-// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast(bitcast, %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>>), !cir.ptr<!cir.array<!rec_CtorDtor x 125>>
+// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_CtorDtor x 5> x 5> x 5>> -> !cir.ptr<!cir.array<!rec_CtorDtor x 125>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<124> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BITCAST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 125>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BITCAST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 125>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[LAST_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[ARR_IDX]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-NoOps.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-NoOps.cpp
index 38df813..7e2b8b8 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-NoOps.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-NoOps.cpp
@@ -1,4 +1,4 @@
-// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
struct NoOps { int i = 0; };
@@ -9,7 +9,33 @@ void do_things(unsigned A, unsigned B) {
// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSA5_5NoOps : !cir.ptr<!cir.array<!rec_NoOps x 5>> init {
// CHECK-NEXT: ^bb0(%arg0: !cir.ptr<!cir.array<!rec_NoOps x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!rec_NoOps x 5>, !cir.ptr<!cir.array<!rec_NoOps x 5>>, ["openacc.private.init"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// Init Section.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+//
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!rec_NoOps x 5>> -> !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -20,7 +46,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: ^bb0(%arg0: !cir.ptr<!cir.array<!rec_NoOps x 5>> {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!rec_NoOps x 5>, !cir.ptr<!cir.array<!rec_NoOps x 5>>, ["openacc.private.init", init] {alignment = 16 : i64}
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<5> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!rec_NoOps x 5>> -> !cir.ptr<!rec_NoOps>
// CHECK-NEXT: %[[ONE_PAST_LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_NoOps>, %[[ARR_SIZE]] : !u64i), !cir.ptr<!rec_NoOps>
// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[DECAY]], %[[ARR_IDX]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
@@ -45,7 +71,58 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_A5_5NoOps : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%arg0: !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!rec_NoOps x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> -> !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>>
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+//
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_NoOps x 5>> -> !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT:}
;
@@ -57,9 +134,9 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_A5_5NoOps : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%arg0: !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!rec_NoOps x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>>, ["openacc.private.init", init] {alignment = 16 : i64}
-// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast(bitcast, %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>>), !cir.ptr<!cir.array<!rec_NoOps x 25>>
+// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> -> !cir.ptr<!cir.array<!rec_NoOps x 25>>
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<25> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BITCAST]] : !cir.ptr<!cir.array<!rec_NoOps x 25>>), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BITCAST]] : !cir.ptr<!cir.array<!rec_NoOps x 25>> -> !cir.ptr<!rec_NoOps>
// CHECK-NEXT: %[[ONE_PAST_LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_NoOps>, %[[ARR_SIZE]] : !u64i), !cir.ptr<!rec_NoOps>
// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[DECAY]], %[[ARR_IDX]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
@@ -84,7 +161,78 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT:acc.private.recipe @privatization__Bcnt3__ZTSA5_A5_A5_5NoOps : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i
+// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
+// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> -> !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>>
+// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> -> !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_NoOps x 5>> -> !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[BOUND1_STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[BOUND1_STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT:}
;
@@ -98,7 +246,73 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_A5_A5_5NoOps : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64}
-// TODO: Add Init here.
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> -> !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>>
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+//
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> -> !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<5> : !u64i
+// CHECK-NEXT: %[[ARR_DECAY:.*]] = cir.cast array_to_ptrdecay %[[STRIDE]] : !cir.ptr<!cir.array<!rec_NoOps x 5>> -> !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[ARR_DECAY]] : !cir.ptr<!rec_NoOps>, %[[ARR_SIZE]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[ARR_DECAY]], %[[ARR_IDX]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[IDX_LOAD]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[INC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr<!rec_NoOps>, %[[ONE]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.store %[[INC_STRIDE]], %[[ARR_IDX]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[LAST_ELT]]) : !cir.ptr<!rec_NoOps>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT:}
;
@@ -110,9 +324,9 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_A5_A5_5NoOps : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>>, ["openacc.private.init", init] {alignment = 16 : i64}
-// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast(bitcast, %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>>), !cir.ptr<!cir.array<!rec_NoOps x 125>>
+// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> -> !cir.ptr<!cir.array<!rec_NoOps x 125>>
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<125> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BITCAST]] : !cir.ptr<!cir.array<!rec_NoOps x 125>>), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BITCAST]] : !cir.ptr<!cir.array<!rec_NoOps x 125>> -> !cir.ptr<!rec_NoOps>
// CHECK-NEXT: %[[ONE_PAST_LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_NoOps>, %[[ARR_SIZE]] : !u64i), !cir.ptr<!rec_NoOps>
// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[DECAY]], %[[ARR_IDX]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-int.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-int.cpp
index 3d4aaa0..e83e548 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-int.cpp
@@ -7,7 +7,6 @@ void do_things(unsigned A, unsigned B) {
// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init {
// CHECK-NEXT: ^bb0(%arg0: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.private.init"] {alignment = 4 : i64}
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -26,7 +25,6 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_A5_i : !cir.ptr<!cir.array<!cir.array<!s32i x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%arg0: !cir.ptr<!cir.array<!cir.array<!s32i x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!s32i x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!s32i x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64}
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT:}
;
@@ -47,7 +45,6 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT:acc.private.recipe @privatization__Bcnt3__ZTSA5_A5_A5_i : !cir.ptr<!cir.array<!cir.array<!cir.array<!s32i x 5> x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!s32i x 5> x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!cir.array<!s32i x 5> x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!cir.array<!s32i x 5> x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64}
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT:}
;
@@ -61,7 +58,6 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_A5_A5_i : !cir.ptr<!cir.array<!cir.array<!cir.array<!s32i x 5> x 5> x 5>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!s32i x 5> x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}):
// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!cir.array<!s32i x 5> x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!cir.array<!s32i x 5> x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64}
-// TODO: Add Init here.
// CHECK-NEXT: acc.yield
// CHECK-NEXT:}
;
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp
index 52bcd7c..3149493 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp
@@ -1,4 +1,4 @@
-// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
struct CtorDtor {
int i;
@@ -20,7 +20,57 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[ELT_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[ELT_STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.ptr<!rec_CtorDtor>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!rec_CtorDtor>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -77,13 +127,108 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -165,6 +310,30 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i
@@ -172,13 +341,131 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA3:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA3]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+//
+// Init:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i
+// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
+// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND3_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_LOAD]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_LOAD]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -279,13 +566,59 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -311,13 +644,90 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
//
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> -> !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+//
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> -> !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[ELT_STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[ELT_STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -336,7 +746,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> -> !cir.ptr<!cir.ptr<!rec_CtorDtor>>
// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
// CHECK-NEXT: cir.scope {
// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
@@ -399,7 +809,79 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!rec_CtorDtor x 5>, !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[ELT_STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[ELT_STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -435,7 +917,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_CtorDtor>) -> ()
// CHECK-NEXT: cir.yield
@@ -478,6 +960,10 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
//
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_CtorDtor>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+//
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i
@@ -485,13 +971,130 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i
// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Init Section.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i
+// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
+// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_CtorDtor>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_CtorDtor>> x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_CtorDtor>> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -511,7 +1114,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_CtorDtor>> x 5>>), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_CtorDtor>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
// CHECK-NEXT: cir.scope {
// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
@@ -589,13 +1192,39 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
//
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_CtorDtor>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -622,6 +1251,31 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i
@@ -629,7 +1283,100 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array<!rec_CtorDtor x 5>, !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
//
-// TODO: Add Init here.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i
+// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
+// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>>, !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>
+// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND3_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_LOAD]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -683,7 +1430,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_CtorDtor>) -> ()
// CHECK-NEXT: cir.yield
@@ -729,13 +1476,124 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array<!rec_CtorDtor x 5>, !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Initialization.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>>, !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
+// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<5> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[ARR_SIZE]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[DECAY]], %[[IDX]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[IDX_LOAD]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr<!rec_CtorDtor>, %[[ONE]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.store %[[INC]], %[[IDX]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[ELT]]) : !cir.ptr<!rec_CtorDtor>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -777,7 +1635,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[TLA_STRIDE]] : !cir.ptr<!cir.ptr<!cir.array<!rec_CtorDtor x 5>>>, !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_CtorDtor x 5>>
// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i
-// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>>), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[STRIDE]] : !cir.ptr<!cir.array<!rec_CtorDtor x 5>> -> !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_CtorDtor>, %[[LAST_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, ["__array_idx"] {alignment = 1 : i64}
// CHECK-NEXT: cir.store %[[ELT]], %[[IDX]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
@@ -838,9 +1696,37 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!rec_CtorDtor> x 5>, !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i
+//
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> -> !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
@@ -848,7 +1734,100 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i
+// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
+// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>, !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>
+// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> -> !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -885,7 +1864,7 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: cir.condition(%[[COND]])
// CHECK-NEXT: } body {
// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[BOUND3_STRIDE]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> -> !cir.ptr<!cir.ptr<!rec_CtorDtor>>
// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
// CHECK-NEXT: cir.scope {
// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
@@ -948,7 +1927,30 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!rec_CtorDtor> x 5>, !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp
index 4398216..ed8c380 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp
@@ -1,4 +1,4 @@
-// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
struct NoOps { int i = 0; };
@@ -15,14 +15,64 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[ELT_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[ELT_STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
#pragma acc parallel private(OnePtr[B])
;
#pragma acc parallel private(OnePtr)
-// CHECK-NEXT: acc.private.recipe @privatization__ZTSP5NoOps : !cir.ptr<!cir.ptr<!rec_NoOps>> init {
+// CHECK: acc.private.recipe @privatization__ZTSP5NoOps : !cir.ptr<!cir.ptr<!rec_NoOps>> init {
// CHECK-NEXT: ^bb0(%arg0: !cir.ptr<!cir.ptr<!rec_NoOps>> {{.*}}):
// CHECK-NEXT: cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, ["openacc.private.init"] {alignment = 8 : i64}
// CHECK-NEXT: acc.yield
@@ -41,13 +91,107 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// Init Section.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -75,6 +219,30 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i
@@ -82,13 +250,130 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA3:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA3]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Init:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i
+// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
+// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND3_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_LOAD]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_LOAD]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -109,13 +394,59 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -141,13 +472,89 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
//
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> -> !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> -> !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[ELT_STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[ELT_STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -176,7 +583,78 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!rec_NoOps x 5>, !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// Init Section
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_NoOps x 5>> -> !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[ELT_STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[ELT_STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
#pragma acc parallel private(PtrToArrays[B][A:B])
@@ -200,6 +678,10 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
//
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_NoOps>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+//
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i
@@ -207,13 +689,129 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i
// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// Init Section.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i
+// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
+// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_NoOps>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -231,13 +829,39 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
//
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!rec_NoOps>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -265,13 +889,130 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!rec_NoOps x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i
// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<20> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array<!rec_NoOps x 5>, !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i
+// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
+// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>>, !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>
+// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND3_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_LOAD]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_NoOps x 5>> -> !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -292,13 +1033,123 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!rec_NoOps x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array<!rec_NoOps x 5>, !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// Init Section.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>>, !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!cir.array<!rec_NoOps x 5>>>, !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>>
+// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<5> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[STRIDE]] : !cir.ptr<!cir.array<!rec_NoOps x 5>> -> !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_NoOps>, %[[ARR_SIZE]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, ["__array_idx"] {alignment = 1 : i64}
+// CHECK-NEXT: cir.store %[[DECAY]], %[[IDX]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.do {
+// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[IDX_LOAD]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr<!rec_NoOps>, %[[ONE]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.store %[[INC]], %[[IDX]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } while {
+// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[ELT]]) : !cir.ptr<!rec_NoOps>, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -325,20 +1176,141 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i
-// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
+// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!rec_NoOps> x 5>, !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i
//
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> -> !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i
// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// Init Section:
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i
+// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
+// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>>, !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>
+// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> -> !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -358,7 +1330,30 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!rec_NoOps> x 5>, !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp
index 79692d3..aac7573 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp
@@ -1,4 +1,4 @@
-// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
template<typename T>
void do_things(unsigned A, unsigned B) {
@@ -13,7 +13,31 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -39,13 +63,59 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -73,6 +143,30 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i
@@ -80,13 +174,57 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA3:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA3]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -107,13 +245,59 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -138,13 +322,39 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
//
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>> -> !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -172,7 +382,30 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!s32i x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -197,6 +430,10 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
//
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!s32i>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+//
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i
@@ -204,13 +441,58 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i
// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -228,13 +510,39 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
//
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!cir.ptr<!s32i>> x 5>> -> !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -262,13 +570,60 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+//
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i
// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<20> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.array<!s32i x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT:}
;
@@ -289,13 +644,59 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!cir.array<!s32i x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!s32i x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!s32i x 5>>, !cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -325,17 +726,67 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!s32i> x 5>, !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i
//
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>> -> !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+//
// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i
// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[STRIDE]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -356,7 +807,30 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i
// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i
// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!s32i> x 5>, !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>
+// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>>
+// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!s32i> x 5>>>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp
index 77ff357..77b7143 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp
@@ -1,4 +1,4 @@
-// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
struct CtorDtor {
int i;
@@ -29,7 +29,34 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -44,13 +71,64 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i
// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -71,6 +149,32 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i
@@ -78,13 +182,138 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_PTR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UPPER_BOUND_CAST_3:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS_2:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[NUM_ELTS]]) : !u64i
// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS_2]], %[[SIZEOF_INT]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_ALLOCA]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// Initialization Section
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUNDS3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i
+// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUNDS3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
+// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>> {{.*}}, %[[BOUNDS1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUNDS2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUNDS3:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -193,7 +422,33 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -208,13 +463,116 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i
// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_INT]]) : !u64i
// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Initialization Section
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: %[[TLA_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+//
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_STRIDE_LOAD:.*]] = cir.load %[[TLA_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_STRIDE_LOAD]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!rec_CtorDtor>>> {{.*}}, %[[BOUNDS1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUNDS2:.*]]: !acc.data_bounds_ty {{.*}}):
@@ -299,7 +657,60 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[SIZEOF_CTORDTOR:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_CTORDTOR]]) : !u64i
// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Initialization Section
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!rec_CtorDtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor>
+// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!rec_CtorDtor>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!rec_CtorDtor>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}):
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp
index 4822dd7..b988fc4 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp
@@ -1,4 +1,4 @@
-// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
struct NoOps { int i = 0; };
@@ -23,7 +23,33 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -38,13 +64,65 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+//
// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i
// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -63,6 +141,33 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+//
// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i
@@ -70,13 +175,139 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_PTR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+//
// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UPPER_BOUND_CAST_3:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS_2:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[NUM_ELTS]]) : !u64i
// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS_2]], %[[SIZEOF_INT]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_ALLOCA]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Init Section.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i
+// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i
+// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -89,7 +320,7 @@ void do_things(unsigned A, unsigned B) {
T **TwoPtr;
#pragma acc parallel private(TwoPtr)
-// CHECK-NEXT: acc.private.recipe @privatization__ZTSPP5NoOps : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> init {
+// CHECK: acc.private.recipe @privatization__ZTSPP5NoOps : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> {{.*}}):
// CHECK-NEXT: cir.alloca !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, ["openacc.private.init"] {alignment = 8 : i64}
// CHECK-NEXT: acc.yield
@@ -106,7 +337,33 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -121,13 +378,116 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i
// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_INT]]) : !u64i
// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Initialization Section.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i
+// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i
+// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: %[[TLA_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+//
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_STRIDE_LOAD:.*]] = cir.load %[[TLA_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_STRIDE_LOAD]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -155,7 +515,60 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[SIZEOF_NOOPS:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_NOOPS]]) : !u64i
// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+// Init Section.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i
+// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!rec_NoOps>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NoOps>
+// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> ()
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp
index ddf25de..c87e1a6 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp
@@ -1,4 +1,4 @@
-// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s
template<typename T>
void do_things(unsigned A, unsigned B) {
@@ -21,7 +21,33 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -36,13 +62,65 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+
// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i
// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -61,6 +139,33 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
+//
// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i
@@ -68,13 +173,64 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_PTR_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UPPER_BOUND_CAST_3:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS_2:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[NUM_ELTS]]) : !u64i
// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS_2]], %[[SIZEOF_INT]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_ALLOCA]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -104,7 +260,34 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -119,6 +302,32 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i
// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64}
//
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!cir.ptr<!cir.ptr<!s32i>>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i
// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i
@@ -126,7 +335,32 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_INT]]) : !u64i
// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
//
-// TODO: Add Init here.
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
+//
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
@@ -154,7 +388,33 @@ void do_things(unsigned A, unsigned B) {
// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i
// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT]]) : !u64i
// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64}
-// TODO: Add Init here.
+//
+// Copy array pointer to the original alloca.
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+//
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i
+// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr<!s32i>, %[[SRC_IDX]] : !u64i), !cir.ptr<!s32i>
+// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!s32i>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield
// CHECK-NEXT: }
;
diff --git a/clang/test/CIR/CodeGenOpenACC/serial.c b/clang/test/CIR/CodeGenOpenACC/serial.c
index 9e33591..aae4a92 100644
--- a/clang/test/CIR/CodeGenOpenACC/serial.c
+++ b/clang/test/CIR/CodeGenOpenACC/serial.c
@@ -29,7 +29,7 @@ void acc_serial(int cond) {
// CHECK-NEXT: cir.scope {
// CHECK-NEXT: cir.while {
// CHECK-NEXT: %[[INT:.*]] = cir.const #cir.int<1>
- // CHECK-NEXT: %[[CAST:.*]] = cir.cast(int_to_bool, %[[INT]] :
+ // CHECK-NEXT: %[[CAST:.*]] = cir.cast int_to_bool %[[INT]]
// CHECK-NEXT: cir.condition(%[[CAST]])
// CHECK-NEXT: } do {
// CHECK-NEXT: cir.yield
@@ -49,7 +49,7 @@ void acc_serial(int cond) {
#pragma acc serial self(cond)
{}
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.serial self(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.yield
@@ -58,7 +58,7 @@ void acc_serial(int cond) {
#pragma acc serial self(0)
{}
// CHECK-NEXT: %[[ZERO_LITERAL:.*]] = cir.const #cir.int<0> : !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ZERO_LITERAL]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ZERO_LITERAL]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.serial self(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.yield
@@ -67,7 +67,7 @@ void acc_serial(int cond) {
#pragma acc serial if(cond)
{}
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.serial if(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.yield
@@ -76,7 +76,7 @@ void acc_serial(int cond) {
#pragma acc serial if(1)
{}
// CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.serial if(%[[CONV_CAST]]) {
// CHECK-NEXT: acc.yield
diff --git a/clang/test/CIR/CodeGenOpenACC/set.c b/clang/test/CIR/CodeGenOpenACC/set.c
index 0b87f42..b8030df 100644
--- a/clang/test/CIR/CodeGenOpenACC/set.c
+++ b/clang/test/CIR/CodeGenOpenACC/set.c
@@ -26,7 +26,7 @@ void acc_set(int cond) {
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: %[[COND_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_LOAD]] : !s32i to si32
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.set device_num(%[[COND_CONV]] : si32) if(%[[BOOL_CONV]])
@@ -36,7 +36,7 @@ void acc_set(int cond) {
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: %[[COND_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_LOAD]] : !s32i to si32
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.set default_async(%[[ONE_CONV]] : si32) device_num(%[[COND_CONV]] : si32) if(%[[BOOL_CONV]]) attributes {device_type = #acc.device_type<radeon>}
diff --git a/clang/test/CIR/CodeGenOpenACC/shutdown.c b/clang/test/CIR/CodeGenOpenACC/shutdown.c
index b68ef90..8c27fa6 100644
--- a/clang/test/CIR/CodeGenOpenACC/shutdown.c
+++ b/clang/test/CIR/CodeGenOpenACC/shutdown.c
@@ -18,13 +18,13 @@ void acc_shutdown(int cond) {
#pragma acc shutdown if(cond)
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.shutdown if(%[[BOOL_CONV]])
#pragma acc shutdown if(1)
// CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i
- // CHECK-NEXT: %[[ONE_TO_BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[ONE_TO_BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[ONE_TO_BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.shutdown if(%[[BOOL_CONV]])
@@ -40,7 +40,7 @@ void acc_shutdown(int cond) {
#pragma acc shutdown if(cond) device_num(cond) device_type(*)
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_CAST]] : !cir.bool to i1
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
// CHECK-NEXT: %[[COND_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_LOAD]] : !s32i to si32
diff --git a/clang/test/CIR/CodeGenOpenACC/wait.c b/clang/test/CIR/CodeGenOpenACC/wait.c
index aeda8b9..8be8665 100644
--- a/clang/test/CIR/CodeGenOpenACC/wait.c
+++ b/clang/test/CIR/CodeGenOpenACC/wait.c
@@ -10,7 +10,7 @@ void acc_wait(int cond) {
#pragma acc wait if (cond)
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: acc.wait if(%[[CONV_CAST]])
@@ -37,7 +37,7 @@ void acc_wait(int cond) {
#pragma acc wait(queues:1) if (cond)
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE_LITERAL]] : !s32i to si32
@@ -54,7 +54,7 @@ void acc_wait(int cond) {
#pragma acc wait(devnum:1: 2, 3) if (cond)
// CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr<!s32i>, !s32i
- // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool
// CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1
// CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i
// CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE_LITERAL]] : !s32i to si32
diff --git a/clang/test/CIR/IR/alloca.cir b/clang/test/CIR/IR/alloca.cir
index 12f7e6a..d94da81 100644
--- a/clang/test/CIR/IR/alloca.cir
+++ b/clang/test/CIR/IR/alloca.cir
@@ -1,5 +1,5 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!u64i = !cir.int<u, 64>
!u8i = !cir.int<u, 8>
@@ -12,7 +12,7 @@ module {
%2 = cir.load align(8) %0 : !cir.ptr<!u64i>, !u64i
// Dynamically sized alloca
%3 = cir.alloca !u8i, !cir.ptr<!u8i>, %2 : !u64i, ["bi_alloca"] {alignment = 16 : i64}
- %4 = cir.cast(bitcast, %3 : !cir.ptr<!u8i>), !cir.ptr<!void>
+ %4 = cir.cast bitcast %3 : !cir.ptr<!u8i> -> !cir.ptr<!void>
cir.store %4, %1 : !cir.ptr<!void>, !cir.ptr<!cir.ptr<!void>>
%5 = cir.load %1 : !cir.ptr<!cir.ptr<!void>>, !cir.ptr<!void>
cir.return %5 : !cir.ptr<!void>
@@ -24,7 +24,7 @@ module {
// CHECK: cir.store %arg0, %0 : !u64i, !cir.ptr<!u64i>
// CHECK: %2 = cir.load align(8) %0 : !cir.ptr<!u64i>, !u64i
// CHECK: %3 = cir.alloca !u8i, !cir.ptr<!u8i>, %2 : !u64i, ["bi_alloca"] {alignment = 16 : i64}
- // CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr<!u8i>), !cir.ptr<!void>
+ // CHECK: %4 = cir.cast bitcast %3 : !cir.ptr<!u8i> -> !cir.ptr<!void>
// CHECK: cir.store %4, %1 : !cir.ptr<!void>, !cir.ptr<!cir.ptr<!void>>
// CHECK: %5 = cir.load %1 : !cir.ptr<!cir.ptr<!void>>, !cir.ptr<!void>
// CHECK: cir.return %5 : !cir.ptr<!void>
diff --git a/clang/test/CIR/IR/array-ctor.cir b/clang/test/CIR/IR/array-ctor.cir
index 2378992..fd2ec7e 100644
--- a/clang/test/CIR/IR/array-ctor.cir
+++ b/clang/test/CIR/IR/array-ctor.cir
@@ -1,5 +1,5 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!u8i = !cir.int<u, 8>
!rec_S = !cir.record<struct "S" padded {!u8i}>
diff --git a/clang/test/CIR/IR/array-dtor.cir b/clang/test/CIR/IR/array-dtor.cir
index 6d08d16..1bb9ff9 100644
--- a/clang/test/CIR/IR/array-dtor.cir
+++ b/clang/test/CIR/IR/array-dtor.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!u8i = !cir.int<u, 8>
!rec_S = !cir.record<struct "S" padded {!u8i}>
diff --git a/clang/test/CIR/IR/array.cir b/clang/test/CIR/IR/array.cir
index bba5360..ddc6b92 100644
--- a/clang/test/CIR/IR/array.cir
+++ b/clang/test/CIR/IR/array.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
diff --git a/clang/test/CIR/IR/atomic.cir b/clang/test/CIR/IR/atomic.cir
index 6ca5af2..8520763 100644
--- a/clang/test/CIR/IR/atomic.cir
+++ b/clang/test/CIR/IR/atomic.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
!u32i = !cir.int<u, 32>
diff --git a/clang/test/CIR/IR/binassign.cir b/clang/test/CIR/IR/binassign.cir
index a257296..0247126 100644
--- a/clang/test/CIR/IR/binassign.cir
+++ b/clang/test/CIR/IR/binassign.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | cir-opt | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
!s8i = !cir.int<s, 8>
@@ -12,7 +12,7 @@ module {
%4 = cir.const #true
cir.store %4, %0 : !cir.bool, !cir.ptr<!cir.bool>
%5 = cir.const #cir.int<65> : !s32i
- %6 = cir.cast(integral, %5 : !s32i), !s8i
+ %6 = cir.cast integral %5 : !s32i -> !s8i
cir.store %6, %1 : !s8i, !cir.ptr<!s8i>
%7 = cir.const #cir.fp<3.140000e+00> : !cir.float
cir.store %7, %2 : !cir.float, !cir.ptr<!cir.float>
@@ -34,7 +34,7 @@ module {
// CHECK: %4 = cir.const #true
// CHECK: cir.store %4, %0 : !cir.bool, !cir.ptr<!cir.bool>
// CHECK: %5 = cir.const #cir.int<65> : !s32i
-// CHECK: %6 = cir.cast(integral, %5 : !s32i), !s8i
+// CHECK: %6 = cir.cast integral %5 : !s32i -> !s8i
// CHECK: cir.store %6, %1 : !s8i, !cir.ptr<!s8i>
// CHECK: %7 = cir.const #cir.fp<3.140000e+00> : !cir.float
// CHECK: cir.store %7, %2 : !cir.float, !cir.ptr<!cir.float>
diff --git a/clang/test/CIR/IR/bitfield_info.cir b/clang/test/CIR/IR/bitfield_info.cir
index 682e090..2d743fb 100644
--- a/clang/test/CIR/IR/bitfield_info.cir
+++ b/clang/test/CIR/IR/bitfield_info.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
!u32i = !cir.int<u, 32>
diff --git a/clang/test/CIR/IR/call.cir b/clang/test/CIR/IR/call.cir
index 9607df7..59f28be 100644
--- a/clang/test/CIR/IR/call.cir
+++ b/clang/test/CIR/IR/call.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir
index a335887..3f2fca9 100644
--- a/clang/test/CIR/IR/cast.cir
+++ b/clang/test/CIR/IR/cast.cir
@@ -1,23 +1,23 @@
-// RUN: cir-opt %s | cir-opt | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
module {
cir.func @yolo(%arg0 : !s32i) {
- %a = cir.cast (int_to_bool, %arg0 : !s32i), !cir.bool
+ %a = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
%0 = cir.const #cir.int<0> : !s32i
cir.return
}
cir.func @bitcast(%p: !cir.ptr<!s32i>) {
- %0 = cir.cast(bitcast, %p : !cir.ptr<!s32i>), !cir.ptr<f32>
+ %0 = cir.cast bitcast %p : !cir.ptr<!s32i> -> !cir.ptr<f32>
cir.return
}
}
// CHECK: cir.func{{.*}} @yolo(%arg0: !s32i)
-// CHECK: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool
+// CHECK: %0 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
// CHECK: %1 = cir.const #cir.int<0> : !s32i
// CHECK: cir.func{{.*}} @bitcast
-// CHECK: %0 = cir.cast(bitcast, %arg0 : !cir.ptr<!s32i>), !cir.ptr<f32>
+// CHECK: %0 = cir.cast bitcast %arg0 : !cir.ptr<!s32i> -> !cir.ptr<f32>
diff --git a/clang/test/CIR/IR/cmp.cir b/clang/test/CIR/IR/cmp.cir
index 8185271..0d47398 100644
--- a/clang/test/CIR/IR/cmp.cir
+++ b/clang/test/CIR/IR/cmp.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | cir-opt | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
!u32i = !cir.int<u, 32>
@@ -274,39 +274,39 @@ module {
cir.store %arg0, %0 : !cir.bool, !cir.ptr<!cir.bool>
cir.store %arg1, %1 : !cir.bool, !cir.ptr<!cir.bool>
%3 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- %4 = cir.cast(bool_to_int, %3 : !cir.bool), !s32i
+ %4 = cir.cast bool_to_int %3 : !cir.bool -> !s32i
%5 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
- %6 = cir.cast(bool_to_int, %5 : !cir.bool), !s32i
+ %6 = cir.cast bool_to_int %5 : !cir.bool -> !s32i
%7 = cir.cmp(gt, %4, %6) : !s32i, !cir.bool
cir.store %7, %2 : !cir.bool, !cir.ptr<!cir.bool>
%8 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- %9 = cir.cast(bool_to_int, %8 : !cir.bool), !s32i
+ %9 = cir.cast bool_to_int %8 : !cir.bool -> !s32i
%10 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
- %11 = cir.cast(bool_to_int, %10 : !cir.bool), !s32i
+ %11 = cir.cast bool_to_int %10 : !cir.bool -> !s32i
%12 = cir.cmp(lt, %9, %11) : !s32i, !cir.bool
cir.store %12, %2 : !cir.bool, !cir.ptr<!cir.bool>
%13 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- %14 = cir.cast(bool_to_int, %13 : !cir.bool), !s32i
+ %14 = cir.cast bool_to_int %13 : !cir.bool -> !s32i
%15 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
- %16 = cir.cast(bool_to_int, %15 : !cir.bool), !s32i
+ %16 = cir.cast bool_to_int %15 : !cir.bool -> !s32i
%17 = cir.cmp(ge, %14, %16) : !s32i, !cir.bool
cir.store %17, %2 : !cir.bool, !cir.ptr<!cir.bool>
%18 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- %19 = cir.cast(bool_to_int, %18 : !cir.bool), !s32i
+ %19 = cir.cast bool_to_int %18 : !cir.bool -> !s32i
%20 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
- %21 = cir.cast(bool_to_int, %20 : !cir.bool), !s32i
+ %21 = cir.cast bool_to_int %20 : !cir.bool -> !s32i
%22 = cir.cmp(le, %19, %21) : !s32i, !cir.bool
cir.store %22, %2 : !cir.bool, !cir.ptr<!cir.bool>
%23 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- %24 = cir.cast(bool_to_int, %23 : !cir.bool), !s32i
+ %24 = cir.cast bool_to_int %23 : !cir.bool -> !s32i
%25 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
- %26 = cir.cast(bool_to_int, %25 : !cir.bool), !s32i
+ %26 = cir.cast bool_to_int %25 : !cir.bool -> !s32i
%27 = cir.cmp(eq, %24, %26) : !s32i, !cir.bool
cir.store %27, %2 : !cir.bool, !cir.ptr<!cir.bool>
%28 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- %29 = cir.cast(bool_to_int, %28 : !cir.bool), !s32i
+ %29 = cir.cast bool_to_int %28 : !cir.bool -> !s32i
%30 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
- %31 = cir.cast(bool_to_int, %30 : !cir.bool), !s32i
+ %31 = cir.cast bool_to_int %30 : !cir.bool -> !s32i
%32 = cir.cmp(ne, %29, %31) : !s32i, !cir.bool
cir.store %32, %2 : !cir.bool, !cir.ptr<!cir.bool>
cir.return
@@ -319,39 +319,39 @@ module {
// CHECK-NEXT: cir.store %arg0, %0 : !cir.bool, !cir.ptr<!cir.bool>
// CHECK-NEXT: cir.store %arg1, %1 : !cir.bool, !cir.ptr<!cir.bool>
// CHECK-NEXT: %3 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- // CHECK-NEXT: %4 = cir.cast(bool_to_int, %3 : !cir.bool), !s32i
+ // CHECK-NEXT: %4 = cir.cast bool_to_int %3 : !cir.bool -> !s32i
// CHECK-NEXT: %5 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
- // CHECK-NEXT: %6 = cir.cast(bool_to_int, %5 : !cir.bool), !s32i
+ // CHECK-NEXT: %6 = cir.cast bool_to_int %5 : !cir.bool -> !s32i
// CHECK-NEXT: %7 = cir.cmp(gt, %4, %6) : !s32i, !cir.bool
// CHECK-NEXT: cir.store %7, %2 : !cir.bool, !cir.ptr<!cir.bool>
// CHECK-NEXT: %8 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- // CHECK-NEXT: %9 = cir.cast(bool_to_int, %8 : !cir.bool), !s32i
+ // CHECK-NEXT: %9 = cir.cast bool_to_int %8 : !cir.bool -> !s32i
// CHECK-NEXT: %10 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
- // CHECK-NEXT: %11 = cir.cast(bool_to_int, %10 : !cir.bool), !s32i
+ // CHECK-NEXT: %11 = cir.cast bool_to_int %10 : !cir.bool -> !s32i
// CHECK-NEXT: %12 = cir.cmp(lt, %9, %11) : !s32i, !cir.bool
// CHECK-NEXT: cir.store %12, %2 : !cir.bool, !cir.ptr<!cir.bool>
// CHECK-NEXT: %13 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- // CHECK-NEXT: %14 = cir.cast(bool_to_int, %13 : !cir.bool), !s32i
+ // CHECK-NEXT: %14 = cir.cast bool_to_int %13 : !cir.bool -> !s32i
// CHECK-NEXT: %15 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
- // CHECK-NEXT: %16 = cir.cast(bool_to_int, %15 : !cir.bool), !s32i
+ // CHECK-NEXT: %16 = cir.cast bool_to_int %15 : !cir.bool -> !s32i
// CHECK-NEXT: %17 = cir.cmp(ge, %14, %16) : !s32i, !cir.bool
// CHECK-NEXT: cir.store %17, %2 : !cir.bool, !cir.ptr<!cir.bool>
// CHECK-NEXT: %18 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- // CHECK-NEXT: %19 = cir.cast(bool_to_int, %18 : !cir.bool), !s32i
+ // CHECK-NEXT: %19 = cir.cast bool_to_int %18 : !cir.bool -> !s32i
// CHECK-NEXT: %20 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
- // CHECK-NEXT: %21 = cir.cast(bool_to_int, %20 : !cir.bool), !s32i
+ // CHECK-NEXT: %21 = cir.cast bool_to_int %20 : !cir.bool -> !s32i
// CHECK-NEXT: %22 = cir.cmp(le, %19, %21) : !s32i, !cir.bool
// CHECK-NEXT: cir.store %22, %2 : !cir.bool, !cir.ptr<!cir.bool>
// CHECK-NEXT: %23 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- // CHECK-NEXT: %24 = cir.cast(bool_to_int, %23 : !cir.bool), !s32i
+ // CHECK-NEXT: %24 = cir.cast bool_to_int %23 : !cir.bool -> !s32i
// CHECK-NEXT: %25 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
- // CHECK-NEXT: %26 = cir.cast(bool_to_int, %25 : !cir.bool), !s32i
+ // CHECK-NEXT: %26 = cir.cast bool_to_int %25 : !cir.bool -> !s32i
// CHECK-NEXT: %27 = cir.cmp(eq, %24, %26) : !s32i, !cir.bool
// CHECK-NEXT: cir.store %27, %2 : !cir.bool, !cir.ptr<!cir.bool>
// CHECK-NEXT: %28 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- // CHECK-NEXT: %29 = cir.cast(bool_to_int, %28 : !cir.bool), !s32i
+ // CHECK-NEXT: %29 = cir.cast bool_to_int %28 : !cir.bool -> !s32i
// CHECK-NEXT: %30 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
- // CHECK-NEXT: %31 = cir.cast(bool_to_int, %30 : !cir.bool), !s32i
+ // CHECK-NEXT: %31 = cir.cast bool_to_int %30 : !cir.bool -> !s32i
// CHECK-NEXT: %32 = cir.cmp(ne, %29, %31) : !s32i, !cir.bool
// CHECK-NEXT: cir.store %32, %2 : !cir.bool, !cir.ptr<!cir.bool>
// CHECK-NEXT: cir.return
diff --git a/clang/test/CIR/IR/complex.cir b/clang/test/CIR/IR/complex.cir
index a73a865..a7e0c77 100644
--- a/clang/test/CIR/IR/complex.cir
+++ b/clang/test/CIR/IR/complex.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
diff --git a/clang/test/CIR/IR/copy.cir b/clang/test/CIR/IR/copy.cir
index 2cfb25d..f9db29a 100644
--- a/clang/test/CIR/IR/copy.cir
+++ b/clang/test/CIR/IR/copy.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
module {
diff --git a/clang/test/CIR/IR/func.cir b/clang/test/CIR/IR/func.cir
index 0e9a92f..9532859 100644
--- a/clang/test/CIR/IR/func.cir
+++ b/clang/test/CIR/IR/func.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
!s64i = !cir.int<s, 64>
diff --git a/clang/test/CIR/IR/global-init.cir b/clang/test/CIR/IR/global-init.cir
index 727c067..2fd25df 100644
--- a/clang/test/CIR/IR/global-init.cir
+++ b/clang/test/CIR/IR/global-init.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt --verify-roundtrip %s -o - | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!u8i = !cir.int<u, 8>
diff --git a/clang/test/CIR/IR/global-var-linkage.cir b/clang/test/CIR/IR/global-var-linkage.cir
index e1b7de4..df74e38 100644
--- a/clang/test/CIR/IR/global-var-linkage.cir
+++ b/clang/test/CIR/IR/global-var-linkage.cir
@@ -1,5 +1,4 @@
-// RUN: cir-opt %s -o %t.cir
-// RUN: FileCheck --input-file=%t.cir %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir
index 28fad6b..0464db8 100644
--- a/clang/test/CIR/IR/global.cir
+++ b/clang/test/CIR/IR/global.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s -o - | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s8i = !cir.int<s, 8>
!s16i = !cir.int<s, 16>
diff --git a/clang/test/CIR/IR/label.cir b/clang/test/CIR/IR/label.cir
index 2211a4e..1049766 100644
--- a/clang/test/CIR/IR/label.cir
+++ b/clang/test/CIR/IR/label.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
diff --git a/clang/test/CIR/IR/module.cir b/clang/test/CIR/IR/module.cir
index 7ce2c0b..8c782fd 100644
--- a/clang/test/CIR/IR/module.cir
+++ b/clang/test/CIR/IR/module.cir
@@ -1,5 +1,4 @@
-// RUN: cir-opt %s -split-input-file -o %t.cir
-// RUN: FileCheck --input-file=%t.cir %s
+// RUN: cir-opt %s -split-input-file --verify-roundtrip | FileCheck %s
// Should parse and print C source language attribute.
module attributes {cir.lang = #cir.lang<c>} { }
diff --git a/clang/test/CIR/IR/stack-save-restore.cir b/clang/test/CIR/IR/stack-save-restore.cir
index f98889ac..476f212 100644
--- a/clang/test/CIR/IR/stack-save-restore.cir
+++ b/clang/test/CIR/IR/stack-save-restore.cir
@@ -1,6 +1,6 @@
// Test the CIR operations can parse and print correctly (roundtrip)
-// RUN: cir-opt %s | cir-opt | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!u8i = !cir.int<u, 8>
diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir
index 33f2e98..2e011fb 100644
--- a/clang/test/CIR/IR/struct.cir
+++ b/clang/test/CIR/IR/struct.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!u8i = !cir.int<u, 8>
!u16i = !cir.int<u, 16>
diff --git a/clang/test/CIR/IR/switch-flat.cir b/clang/test/CIR/IR/switch-flat.cir
index 8c11a74..d39c3e7 100644
--- a/clang/test/CIR/IR/switch-flat.cir
+++ b/clang/test/CIR/IR/switch-flat.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
cir.func @FlatSwitchWithoutDefault(%arg0: !s32i) {
diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir
index 0bdc9c1..87d45bf 100644
--- a/clang/test/CIR/IR/switch.cir
+++ b/clang/test/CIR/IR/switch.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
cir.func @s0() {
diff --git a/clang/test/CIR/IR/ternary.cir b/clang/test/CIR/IR/ternary.cir
index e419c7f..78e1de4 100644
--- a/clang/test/CIR/IR/ternary.cir
+++ b/clang/test/CIR/IR/ternary.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | cir-opt | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!u32i = !cir.int<u, 32>
module {
diff --git a/clang/test/CIR/IR/throw.cir b/clang/test/CIR/IR/throw.cir
index 8b24b48..e7a1bf4 100644
--- a/clang/test/CIR/IR/throw.cir
+++ b/clang/test/CIR/IR/throw.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
diff --git a/clang/test/CIR/IR/unary.cir b/clang/test/CIR/IR/unary.cir
index ba3bc20d..d01d4eb 100644
--- a/clang/test/CIR/IR/unary.cir
+++ b/clang/test/CIR/IR/unary.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
!s64i = !cir.int<s, 64>
diff --git a/clang/test/CIR/IR/vector.cir b/clang/test/CIR/IR/vector.cir
index 6d8e5be..d274c35 100644
--- a/clang/test/CIR/IR/vector.cir
+++ b/clang/test/CIR/IR/vector.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!s32i = !cir.int<s, 32>
diff --git a/clang/test/CIR/IR/vtable-addrpt.cir b/clang/test/CIR/IR/vtable-addrpt.cir
index 0b809cc..7c8fa8d 100644
--- a/clang/test/CIR/IR/vtable-addrpt.cir
+++ b/clang/test/CIR/IR/vtable-addrpt.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
// Test the parsing and printing of a constructor that uses a vtable addess_point op.
@@ -14,7 +14,7 @@ module {
cir.store %arg0, %0 : !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>
%1 = cir.load %0 : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
%2 = cir.vtable.address_point(@_ZTV1S, address_point = <index = 0, offset = 2>) : !cir.vptr
- %3 = cir.cast(bitcast, %1 : !cir.ptr<!rec_S>), !cir.ptr<!cir.vptr>
+ %3 = cir.cast bitcast %1 : !cir.ptr<!rec_S> -> !cir.ptr<!cir.vptr>
cir.store align(8) %2, %3 : !cir.vptr, !cir.ptr<!cir.vptr>
cir.return
}
diff --git a/clang/test/CIR/IR/vtable-attr.cir b/clang/test/CIR/IR/vtable-attr.cir
index 3854208..70e3296 100644
--- a/clang/test/CIR/IR/vtable-attr.cir
+++ b/clang/test/CIR/IR/vtable-attr.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
!rec_Q = !cir.record<struct "Q" {!cir.vptr}>
!rec_S = !cir.record<struct "S" {!cir.vptr}>
diff --git a/clang/test/CIR/IR/vtt-addrpoint.cir b/clang/test/CIR/IR/vtt-addrpoint.cir
index f05bb78..823ddd2 100644
--- a/clang/test/CIR/IR/vtt-addrpoint.cir
+++ b/clang/test/CIR/IR/vtt-addrpoint.cir
@@ -1,4 +1,4 @@
-// RUN: cir-opt %s | FileCheck %s
+// RUN: cir-opt %s --verify-roundtrip | FileCheck %s
// Test the parsing and printing of the two forms of vtt.address_point op, as
// they will appear in constructors.
@@ -26,7 +26,7 @@ module {
cir.call @_ZN1BC2Ev(%4, %5) : (!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>) -> ()
%6 = cir.vtt.address_point %3 : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
- %7 = cir.cast(bitcast, %6 : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+ %7 = cir.cast bitcast %6 : !cir.ptr<!cir.ptr<!void>> -> !cir.ptr<!cir.vptr>
%8 = cir.load align(8) %7 : !cir.ptr<!cir.vptr>, !cir.vptr
%9 = cir.vtable.get_vptr %2 : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
cir.store align(8) %8, %9 : !cir.vptr, !cir.ptr<!cir.vptr>
diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir
index 6842905..ec104ed 100644
--- a/clang/test/CIR/Lowering/cast.cir
+++ b/clang/test/CIR/Lowering/cast.cir
@@ -26,51 +26,51 @@ module {
// Integer casts.
%9 = cir.load %0 : !cir.ptr<!u32i>, !u32i
- %10 = cir.cast(integral, %9 : !u32i), !s8i
+ %10 = cir.cast integral %9 : !u32i -> !s8i
// CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i8
cir.store %10, %3 : !s8i, !cir.ptr<!s8i>
%11 = cir.load %1 : !cir.ptr<!s32i>, !s32i
- %12 = cir.cast(integral, %11 : !s32i), !s16i
+ %12 = cir.cast integral %11 : !s32i -> !s16i
// CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i16
cir.store %12, %4 : !s16i, !cir.ptr<!s16i>
%13 = cir.load %0 : !cir.ptr<!u32i>, !u32i
- %14 = cir.cast(integral, %13 : !u32i), !s64i
+ %14 = cir.cast integral %13 : !u32i -> !s64i
// CHECK: %{{[0-9]+}} = llvm.zext %{{[0-9]+}} : i32 to i64
cir.store %14, %5 : !s64i, !cir.ptr<!s64i>
%15 = cir.load %1 : !cir.ptr<!s32i>, !s32i
- %16 = cir.cast(integral, %15 : !s32i), !s64i
+ %16 = cir.cast integral %15 : !s32i -> !s64i
// CHECK: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64
- %30 = cir.cast(integral, %arg1 : !s32i), !u32i
+ %30 = cir.cast integral %arg1 : !s32i -> !u32i
// Should not produce a cast.
- %32 = cir.cast(integral, %arg0 : !u32i), !s32i
+ %32 = cir.cast integral %arg0 : !u32i -> !s32i
// Should not produce a cast.
%21 = cir.load %20 : !cir.ptr<!s16i>, !s16i
- %22 = cir.cast(integral, %21 : !s16i), !u64i
+ %22 = cir.cast integral %21 : !s16i -> !u64i
// CHECK: %[[TMP:[0-9]+]] = llvm.sext %{{[0-9]+}} : i16 to i64
- %33 = cir.cast(int_to_bool, %arg1 : !s32i), !cir.bool
+ %33 = cir.cast int_to_bool %arg1 : !s32i -> !cir.bool
// CHECK: %[[#ZERO:]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[#CMP:]] = llvm.icmp "ne" %arg1, %[[#ZERO]] : i32
// Pointer casts.
cir.store %16, %6 : !s64i, !cir.ptr<!s64i>
- %23 = cir.cast(int_to_ptr, %22 : !u64i), !cir.ptr<!u8i>
+ %23 = cir.cast int_to_ptr %22 : !u64i -> !cir.ptr<!u8i>
// CHECK: %[[TMP2:[0-9]+]] = llvm.inttoptr %[[TMP]] : i64 to !llvm.ptr
- %24 = cir.cast(ptr_to_int, %23 : !cir.ptr<!u8i>), !s32i
+ %24 = cir.cast ptr_to_int %23 : !cir.ptr<!u8i> -> !s32i
// CHECK: %{{[0-9]+}} = llvm.ptrtoint %[[TMP2]] : !llvm.ptr to i32
- %29 = cir.cast(ptr_to_bool, %23 : !cir.ptr<!u8i>), !cir.bool
+ %29 = cir.cast ptr_to_bool %23 : !cir.ptr<!u8i> -> !cir.bool
// Floating point casts.
- %25 = cir.cast(int_to_float, %arg1 : !s32i), !cir.float
+ %25 = cir.cast int_to_float %arg1 : !s32i -> !cir.float
// CHECK: %{{.+}} = llvm.sitofp %{{.+}} : i32 to f32
- %26 = cir.cast(int_to_float, %arg0 : !u32i), !cir.float
+ %26 = cir.cast int_to_float %arg0 : !u32i -> !cir.float
// CHECK: %{{.+}} = llvm.uitofp %{{.+}} : i32 to f32
- %27 = cir.cast(float_to_int, %arg2 : !cir.float), !s32i
+ %27 = cir.cast float_to_int %arg2 : !cir.float -> !s32i
// CHECK: %{{.+}} = llvm.fptosi %{{.+}} : f32 to i32
- %28 = cir.cast(float_to_int, %arg2 : !cir.float), !u32i
+ %28 = cir.cast float_to_int %arg2 : !cir.float -> !u32i
// CHECK: %{{.+}} = llvm.fptoui %{{.+}} : f32 to i32
%18 = cir.const #cir.int<0> : !s32i
// CHECK: %{{.+}} = llvm.fptrunc %{{.+}} : f64 to f32
- %34 = cir.cast(floating, %arg3 : !cir.double), !cir.float
+ %34 = cir.cast floating %arg3 : !cir.double -> !cir.float
cir.store %18, %2 : !s32i, !cir.ptr<!s32i>
%19 = cir.load %2 : !cir.ptr<!s32i>, !s32i
@@ -84,7 +84,7 @@ module {
cir.store %arg0, %0 : !cir.bool, !cir.ptr<!cir.bool>
%2 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
- %3 = cir.cast(bool_to_int, %2 : !cir.bool), !u8i
+ %3 = cir.cast bool_to_int %2 : !cir.bool -> !u8i
// CHECK: %[[LOAD_BOOL:.*]] = llvm.load %{{.*}} : !llvm.ptr -> i8
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[LOAD_BOOL]] : i8 to i1
// CHECK: %[[EXT:.*]] = llvm.zext %[[TRUNC]] : i1 to i8
diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir
index 3a077aa..888fb38 100644
--- a/clang/test/CIR/Lowering/if.cir
+++ b/clang/test/CIR/Lowering/if.cir
@@ -4,7 +4,7 @@
module {
cir.func @foo(%arg0: !s32i) -> !s32i {
- %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool
+ %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
cir.if %4 {
%5 = cir.const #cir.int<1> : !s32i
cir.return %5 : !s32i
@@ -44,7 +44,7 @@ module {
// LLVM-NEXT: }
cir.func @onlyIf(%arg0: !s32i) -> !s32i {
- %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool
+ %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
cir.if %4 {
%5 = cir.const #cir.int<1> : !s32i
cir.return %5 : !s32i
@@ -66,7 +66,7 @@ module {
// Verify empty if clause is properly lowered to empty block
cir.func @emptyIfClause(%arg0: !s32i) -> !s32i {
// MLIR-LABEL: llvm.func @emptyIfClause
- %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool
+ %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
// MLIR: llvm.cond_br {{%.*}}, ^[[T:.*]], ^[[PHI:.*]]
cir.if %4 {
// MLIR-NEXT: ^[[T]]:
@@ -82,7 +82,7 @@ module {
// addressed
cir.func @emptyIfElseClause(%arg0: !s32i) -> !s32i {
// MLIR-LABEL: llvm.func @emptyIfElseClause
- %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool
+ %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
// MLIR: llvm.cond_br {{%.*}}, ^[[T:.*]], ^[[F:.*]]
cir.if %4 {
// MLIR-NEXT: ^[[T]]:
diff --git a/clang/test/CIR/Lowering/vtt-addrpoint.cir b/clang/test/CIR/Lowering/vtt-addrpoint.cir
index 96dc27d..e1bfd00 100644
--- a/clang/test/CIR/Lowering/vtt-addrpoint.cir
+++ b/clang/test/CIR/Lowering/vtt-addrpoint.cir
@@ -24,7 +24,7 @@ module {
%5 = cir.vtt.address_point %3 : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
cir.call @_ZN1BC2Ev(%4, %5) : (!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>) -> ()
%6 = cir.vtt.address_point %3 : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
- %7 = cir.cast(bitcast, %6 : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+ %7 = cir.cast bitcast %6 : !cir.ptr<!cir.ptr<!void>> -> !cir.ptr<!cir.vptr>
%8 = cir.load align(8) %7 : !cir.ptr<!cir.vptr>, !cir.vptr
%9 = cir.vtable.get_vptr %2 : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
cir.store align(8) %8, %9 : !cir.vptr, !cir.ptr<!cir.vptr>
diff --git a/clang/test/CIR/Transforms/canonicalize.cir b/clang/test/CIR/Transforms/canonicalize.cir
index 5daff11..5606f9e 100644
--- a/clang/test/CIR/Transforms/canonicalize.cir
+++ b/clang/test/CIR/Transforms/canonicalize.cir
@@ -50,39 +50,39 @@ module {
// CHECK-NEXT: }
cir.func @cast1(%arg0: !cir.bool) -> !cir.bool {
- %0 = cir.cast(bool_to_int, %arg0 : !cir.bool), !s32i
- %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool
+ %0 = cir.cast bool_to_int %arg0 : !cir.bool -> !s32i
+ %1 = cir.cast int_to_bool %0 : !s32i -> !cir.bool
cir.return %1 : !cir.bool
}
// CHECK: cir.func{{.*}} @cast1(%[[ARG0:.*]]: !cir.bool) -> !cir.bool
// CHECK-NEXT: cir.return %[[ARG0]] : !cir.bool
cir.func @cast2(%arg0: !s32i) -> !cir.bool {
- %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool
- %1 = cir.cast(bool_to_int, %0 : !cir.bool), !s32i
- %2 = cir.cast(integral, %1 : !s32i), !s64i
- %3 = cir.cast(int_to_bool, %2 : !s64i), !cir.bool
+ %0 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
+ %1 = cir.cast bool_to_int %0 : !cir.bool -> !s32i
+ %2 = cir.cast integral %1 : !s32i -> !s64i
+ %3 = cir.cast int_to_bool %2 : !s64i -> !cir.bool
cir.return %3 : !cir.bool
}
// CHECK: cir.func{{.*}} @cast2(%[[ARG0:.*]]: !s32i) -> !cir.bool
- // CHECK-NEXT: %[[CAST:.*]] = cir.cast(int_to_bool, %[[ARG0]] : !s32i), !cir.bool
+ // CHECK-NEXT: %[[CAST:.*]] = cir.cast int_to_bool %[[ARG0]] : !s32i -> !cir.bool
// CHECK-NEXT: cir.return %[[CAST]] : !cir.bool
cir.func @no_fold_cast(%arg0: !s32i) -> !s64i {
- %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool
- %1 = cir.cast(bool_to_int, %0 : !cir.bool), !s32i
- %2 = cir.cast(integral, %1 : !s32i), !s64i
+ %0 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
+ %1 = cir.cast bool_to_int %0 : !cir.bool -> !s32i
+ %2 = cir.cast integral %1 : !s32i -> !s64i
cir.return %2 : !s64i
}
// CHECK: cir.func{{.*}} @no_fold_cast(%[[ARG0:.*]]: !s32i) -> !s64i
- // CHECK-NEXT: %[[CAST:.*]] = cir.cast(int_to_bool, %[[ARG0]] : !s32i), !cir.bool
- // CHECK-NEXT: %[[CAST2:.*]] = cir.cast(bool_to_int, %[[CAST]] : !cir.bool), !s32i
- // CHECK-NEXT: %[[CAST3:.*]] = cir.cast(integral, %[[CAST2]] : !s32i), !s64i
+ // CHECK-NEXT: %[[CAST:.*]] = cir.cast int_to_bool %[[ARG0]] : !s32i -> !cir.bool
+ // CHECK-NEXT: %[[CAST2:.*]] = cir.cast bool_to_int %[[CAST]] : !cir.bool -> !s32i
+ // CHECK-NEXT: %[[CAST3:.*]] = cir.cast integral %[[CAST2]] : !s32i -> !s64i
// CHECK-NEXT: cir.return %[[CAST3]] : !s64i
cir.func @cast_poison() -> !s64i {
%0 = cir.const #cir.poison : !s32i
- %1 = cir.cast(integral, %0 : !s32i), !s64i
+ %1 = cir.cast integral %0 : !s32i -> !s64i
cir.return %1 : !s64i
}
// CHECK: @cast_poison
diff --git a/clang/test/CIR/Transforms/if.cir b/clang/test/CIR/Transforms/if.cir
index 3f817c7..ced288f7 100644
--- a/clang/test/CIR/Transforms/if.cir
+++ b/clang/test/CIR/Transforms/if.cir
@@ -4,7 +4,7 @@
module {
cir.func @foo(%arg0: !s32i) -> !s32i {
- %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool
+ %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
cir.if %4 {
%5 = cir.const #cir.int<1> : !s32i
cir.return %5 : !s32i
@@ -15,7 +15,7 @@ module {
cir.return %arg0 : !s32i
}
// CHECK: cir.func{{.*}} @foo(%arg0: !s32i) -> !s32i {
-// CHECK-NEXT: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool
+// CHECK-NEXT: %0 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2
// CHECK-NEXT: ^bb1: // pred: ^bb0
// CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i
@@ -28,7 +28,7 @@ module {
// CHECK-NEXT: }
cir.func @onlyIf(%arg0: !s32i) -> !s32i {
- %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool
+ %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
cir.if %4 {
%5 = cir.const #cir.int<1> : !s32i
cir.return %5 : !s32i
@@ -36,7 +36,7 @@ module {
cir.return %arg0 : !s32i
}
// CHECK: cir.func{{.*}} @onlyIf(%arg0: !s32i) -> !s32i {
-// CHECK-NEXT: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool
+// CHECK-NEXT: %0 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2
// CHECK-NEXT: ^bb1: // pred: ^bb0
// CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i
diff --git a/clang/test/CIR/Transforms/switch.cir b/clang/test/CIR/Transforms/switch.cir
index a000d6b..3addfe3 100644
--- a/clang/test/CIR/Transforms/switch.cir
+++ b/clang/test/CIR/Transforms/switch.cir
@@ -261,8 +261,8 @@ module {
// CHECK-NEXT: %[[RANGE:[0-9]+]] = cir.const #cir.int<99>
// CHECK-NEXT: %[[LOWER_BOUND:[0-9]+]] = cir.const #cir.int<1>
// CHECK-NEXT: %[[DIFF:[0-9]+]] = cir.binop(sub, %[[X]], %[[LOWER_BOUND]])
-// CHECK-NEXT: %[[U_DIFF:[0-9]+]] = cir.cast(integral, %[[DIFF]] : !s32i), !u32i
-// CHECK-NEXT: %[[U_RANGE:[0-9]+]] = cir.cast(integral, %[[RANGE]] : !s32i), !u32i
+// CHECK-NEXT: %[[U_DIFF:[0-9]+]] = cir.cast integral %[[DIFF]] : !s32i -> !u32i
+// CHECK-NEXT: %[[U_RANGE:[0-9]+]] = cir.cast integral %[[RANGE]] : !s32i -> !u32i
// CHECK-NEXT: %[[CMP_RESULT:[0-9]+]] = cir.cmp(le, %[[U_DIFF]], %[[U_RANGE]])
// CHECK-NEXT: cir.brcond %[[CMP_RESULT]] ^[[CASE_RANGE]], ^[[CASE_DEFAULT:bb[0-9]+]]
// CHECK-NEXT: ^[[CASE_DEFAULT]]:
@@ -304,8 +304,8 @@ module {
// CHECK: %[[CONST97:.*]] = cir.const #cir.int<97> : !s32i
// CHECK: %[[CONST3:.*]] = cir.const #cir.int<3> : !s32i
// CHECK: %[[SUB:.*]] = cir.binop(sub, %[[COND]], %[[CONST3]]) : !s32i
-// CHECK: %[[CAST1:.*]] = cir.cast(integral, %[[SUB]] : !s32i), !u32i
-// CHECK: %[[CAST2:.*]] = cir.cast(integral, %[[CONST97]] : !s32i), !u32i
+// CHECK: %[[CAST1:.*]] = cir.cast integral %[[SUB]] : !s32i -> !u32i
+// CHECK: %[[CAST2:.*]] = cir.cast integral %[[CONST97]] : !s32i -> !u32i
// CHECK: %[[CMP:.*]] = cir.cmp(le, %[[CAST1]], %[[CAST2]]) : !u32i, !cir.bool
// CHECK: cir.brcond %7 ^bb[[#DEFAULT_BB]], ^bb[[#RANGE_BB:]]
// CHECK: ^bb[[#RANGE_BB]]: // pred: ^bb[[#RANGE_BR]]
diff --git a/clang/test/CXX/drs/cwg25xx.cpp b/clang/test/CXX/drs/cwg25xx.cpp
index 5c2948f..0e0fc73 100644
--- a/clang/test/CXX/drs/cwg25xx.cpp
+++ b/clang/test/CXX/drs/cwg25xx.cpp
@@ -243,19 +243,20 @@ namespace cwg2565 { // cwg2565: 16 open 2023-06-07
// since-cxx20-note@#cwg2565-VC {{because 'b' would be invalid: argument may not have 'void' type}}
template<typename T>
- concept ErrorRequires = requires (ErrorRequires auto x) {
+ concept ErrorRequires = requires (ErrorRequires auto x) { // #cwg2565-expr
// since-cxx20-error@-1 {{a concept definition cannot refer to itself}}
// since-cxx20-note@-2 {{declared here}}
// since-cxx20-error@-3 {{'auto' not allowed in requires expression parameter}}
x;
};
static_assert(ErrorRequires<int>);
- // since-cxx20-error@-1 {{static assertion failed}}
- // since-cxx20-note@-2 {{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // since-cxx20-error@-1 {{static assertion failed}} \
+ // since-cxx20-note@-1 {{because 'int' does not satisfy 'ErrorRequires'}} \
+ // since-cxx20-note@#cwg2565-expr {{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
template<typename T>
concept NestedErrorInRequires = requires (T x) { // #cwg2565-NEIR
- requires requires (NestedErrorInRequires auto y) {
+ requires requires (NestedErrorInRequires auto y) { // #cwg2565-NEIR-inner
// since-cxx20-error@-1 {{a concept definition cannot refer to itself}}
// since-cxx20-note@#cwg2565-NEIR {{declared here}}
// since-cxx20-error@-3 {{'auto' not allowed in requires expression parameter}}
@@ -263,8 +264,9 @@ namespace cwg2565 { // cwg2565: 16 open 2023-06-07
};
};
static_assert(NestedErrorInRequires<int>);
- // since-cxx20-error@-1 {{static assertion failed}}
- // since-cxx20-note@-2 {{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // since-cxx20-error@-1 {{static assertion failed}} \
+ // since-cxx20-note@-1 {{because 'int' does not satisfy 'NestedErrorInRequires'}} \
+ // since-cxx20-note-re@#cwg2565-NEIR-inner {{because {{.*}} would be invalid: constraint depends on a previously diagnosed expression}}
#endif
} // namespace cwg2565
diff --git a/clang/test/CXX/expr/expr.prim/expr.prim.id/p3.cpp b/clang/test/CXX/expr/expr.prim/expr.prim.id/p3.cpp
index 28b5d0a..af2fc93 100644
--- a/clang/test/CXX/expr/expr.prim/expr.prim.id/p3.cpp
+++ b/clang/test/CXX/expr/expr.prim/expr.prim.id/p3.cpp
@@ -140,7 +140,8 @@ concept C7 = sizeof(T) == 1 || sizeof(
::type) == 1;
static_assert(!C6<short>);
-static_assert(!C6<char>); // expected-note{{while checking the satisfaction of concept 'C6<char>' requested here}}
+static_assert(!C6<char>);
+// expected-note@-1 {{while checking the satisfaction of concept 'C6<char>' requested here}}
static_assert(C7<char>);
static_assert(!C7<short>); // expected-note{{while checking the satisfaction of concept 'C7<short>' requested here}}
diff --git a/clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp b/clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp
index 31587a9..af2dce8 100644
--- a/clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp
+++ b/clang/test/CXX/expr/expr.prim/expr.prim.req/compound-requirement.cpp
@@ -35,14 +35,14 @@ using r2i2 = r2<A>; // expected-error{{constraints not satisfied for class templ
using r2i3 = r2<D>;
using r2i4 = r2<const D>; // expected-error{{constraints not satisfied for class template 'r2' [with T = const D]}}
-template<typename T> requires requires { { sizeof(T) }; } // expected-note{{because 'sizeof(T)' would be invalid: invalid application of 'sizeof' to an incomplete type 'void'}} expected-note{{because 'sizeof(T)' would be invalid: invalid application of 'sizeof' to an incomplete type 'nonexistent'}}
+template<typename T> requires requires { { sizeof(T) }; } // expected-note{{because 'sizeof(T)' would be invalid: invalid application of 'sizeof' to an incomplete type 'void'}} expected-note{{because 'sizeof(T)' would be invalid: invalid application of 'sizeof' to an incomplete type 'class nonexistent'}}
struct r3 {};
using r3i1 = r3<int>;
using r3i2 = r3<A>;
using r3i3 = r3<A &>;
using r3i4 = r3<void>; // expected-error{{constraints not satisfied for class template 'r3' [with T = void]}}
-using r3i4 = r3<class nonexistent>; // expected-error{{constraints not satisfied for class template 'r3' [with T = nonexistent]}}
+using r3i4 = r3<class nonexistent>; // expected-error{{constraints not satisfied for class template 'r3' [with T = class nonexistent]}}
// Non-dependent expressions
@@ -89,7 +89,7 @@ template<typename T>
concept Large = sizeof(typename remove_reference<T>::type) >= 4;
// expected-note@-1{{because 'sizeof(typename remove_reference<short &>::type) >= 4' (2 >= 4) evaluated to false}}
-template<typename T> requires requires (T t) { { t } -> Large; } // expected-note{{because 'short &' does not satisfy 'Large':}}
+template<typename T> requires requires (T t) { { t } -> Large; } // expected-note{{because 'short &' does not satisfy 'Large'}}
struct r7 {};
using r7i1 = r7<int>;
@@ -149,7 +149,7 @@ namespace std_example {
template<typename T> constexpr bool is_same_v<T, T> = true;
template<typename T, typename U> concept same_as = is_same_v<T, U>;
- // expected-note@-1 {{because 'is_same_v<int, int *>' evaluated to false}}
+ // expected-note@-1 {{because 'is_same_v<int, typename std_example::T2::inner>' evaluated to false}}
static_assert(C1<int>);
static_assert(C1<int*>);
@@ -160,7 +160,7 @@ namespace std_example {
template<typename T> concept C2 =
requires(T x) {
{*x} -> same_as<typename T::inner>;
- // expected-note@-1{{because type constraint 'same_as<int, typename std_example::T2::inner>' was not satisfied:}}
+ // expected-note@-1{{because 'same_as<int, typename std_example::T2::inner>' evaluated to false}}
// expected-note@-2{{because '*x' would be invalid: indirection requires pointer operand ('int' invalid)}}
};
@@ -173,9 +173,9 @@ namespace std_example {
int operator *() { return 0; }
};
static_assert(C2<T1>);
- template<C2 T> struct C2_check {}; // expected-note{{because 'int' does not satisfy 'C2'}} expected-note{{because 'std_example::T2' does not satisfy 'C2'}}
+ template<C2 T> struct C2_check {}; // expected-note{{because 'int' does not satisfy 'C2'}} expected-note{{because 'T2' does not satisfy 'C2'}}
using c2c1 = C2_check<int>; // expected-error{{constraints not satisfied for class template 'C2_check' [with T = int]}}
- using c2c2 = C2_check<T2>; // expected-error{{constraints not satisfied for class template 'C2_check' [with T = std_example::T2]}}
+ using c2c2 = C2_check<T2>; // expected-error{{constraints not satisfied for class template 'C2_check' [with T = T2]}}
template<typename T>
void g(T t) noexcept(sizeof(T) == 1) {}
diff --git a/clang/test/CXX/expr/expr.prim/expr.prim.req/nested-requirement.cpp b/clang/test/CXX/expr/expr.prim/expr.prim.req/nested-requirement.cpp
index 033ae34..70a96be 100644
--- a/clang/test/CXX/expr/expr.prim/expr.prim.req/nested-requirement.cpp
+++ b/clang/test/CXX/expr/expr.prim/expr.prim.req/nested-requirement.cpp
@@ -43,11 +43,10 @@ namespace std_example {
requires sizeof(a) == 4; // OK
requires a == 0; // expected-error{{substitution into constraint expression resulted in a non-constant expression}}
// expected-note@-1{{while checking the satisfaction of nested requirement requested here}}
- // expected-note@-2{{in instantiation of requirement here}}
- // expected-note@-3{{while checking the satisfaction of nested requirement requested here}}
- // expected-note@-6{{while substituting template arguments into constraint expression here}}
- // expected-note@-5{{function parameter 'a' with unknown value cannot be used in a constant expression}}
- // expected-note@-8{{declared here}}
+ // expected-note@-2{{while checking the satisfaction of nested requirement requested here}}
+ // expected-note@-5{{while substituting template arguments into constraint expression here}}
+ // expected-note@-4{{function parameter 'a' with unknown value cannot be used in a constant expression}}
+ // expected-note@-7{{declared here}}
};
static_assert(C2<int>); // expected-error{{static assertion failed}}
// expected-note@-1{{while checking the satisfaction of concept 'C2<int>' requested here}}
@@ -84,31 +83,26 @@ static_assert(Pipes<S>);
static_assert(Pipes<double>);
static_assert(Amps1<S>);
-static_assert(!Amps1<double>);
+static_assert(Amps1<double>);
static_assert(Amps2<S>);
-static_assert(!Amps2<double>);
+static_assert(Amps2<double>);
template<class T>
-void foo1() requires requires (T x) { // #foo1
+void foo1() requires requires (T x) {
requires
- True<decltype(x.value)> // #foo1Value
+ True<decltype(x.value)>
&& True<T>;
} {}
template<class T> void fooPipes() requires Pipes<T> {}
-template<class T> void fooAmps1() requires Amps1<T> {} // #fooAmps1
+template<class T> void fooAmps1() requires Amps1<T> {}
void foo() {
foo1<S>();
- foo1<int>(); // expected-error {{no matching function for call to 'foo1'}}
- // expected-note@#foo1Value {{because 'True<decltype(x.value)> && True<T>' would be invalid: member reference base type 'int' is not a structure or union}}
- // expected-note@#foo1 {{candidate template ignored: constraints not satisfied [with T = int]}}
+ foo1<int>();
fooPipes<S>();
fooPipes<int>();
fooAmps1<S>();
- fooAmps1<int>(); // expected-error {{no matching function for call to 'fooAmps1'}}
- // expected-note@#fooAmps1 {{candidate template ignored: constraints not satisfied [with T = int]}}
- // expected-note@#fooAmps1 {{because 'int' does not satisfy 'Amps1'}}
- // expected-note@#Amps1 {{because 'True<decltype(x.value)> && True<T> && !False<T>' would be invalid: member reference base type 'int' is not a structure or union}}
+ fooAmps1<int>();
}
template<class T>
@@ -158,15 +152,16 @@ void func() {
// expected-note@#bar {{while substituting template arguments into constraint expression here}}
// expected-note@#bar {{while checking the satisfaction of nested requirement requested here}}
// expected-note@#bar {{candidate template ignored: constraints not satisfied [with T = False]}}
- // expected-note@#bar {{because 'X<SubstitutionFailureNestedRequires::ErrorExpressions_NotSF::False>::value' evaluated to false}}
+ // expected-note@#bar {{because 'X<False>::value' evaluated to false}}
bar<int>();
+ // expected-error@-1 {{no matching function for call to 'bar'}} \
// expected-note@-1 {{while checking constraint satisfaction for template 'bar<int>' required here}} \
- // expected-note@-1 {{while substituting deduced template arguments into function template 'bar' [with T = int]}}
+ // expected-note@-1 {{while substituting deduced template arguments into function template 'bar' [with T = int]}} \
// expected-note@#bar {{in instantiation of static data member}}
- // expected-note@#bar {{in instantiation of requirement here}}
// expected-note@#bar {{while checking the satisfaction of nested requirement requested here}}
// expected-note@#bar {{while substituting template arguments into constraint expression here}}
+ // expected-note@#bar {{candidate template ignored}}
// expected-error@#X_Value {{type 'int' cannot be used prior to '::' because it has no members}}
}
}
diff --git a/clang/test/CXX/expr/expr.prim/expr.prim.req/simple-requirement.cpp b/clang/test/CXX/expr/expr.prim/expr.prim.req/simple-requirement.cpp
index 5199708..5dcb188 100644
--- a/clang/test/CXX/expr/expr.prim/expr.prim.req/simple-requirement.cpp
+++ b/clang/test/CXX/expr/expr.prim/expr.prim.req/simple-requirement.cpp
@@ -39,14 +39,14 @@ using r2i4 = r2<const D>; // expected-error{{constraints not satisfied for class
template<typename T> requires requires { sizeof(T); }
// expected-note@-1{{because 'sizeof(T)' would be invalid: invalid application of 'sizeof' to an incomplete type 'void'}}
-// expected-note@-2{{because 'sizeof(T)' would be invalid: invalid application of 'sizeof' to an incomplete type 'nonexistent'}}
+// expected-note@-2{{because 'sizeof(T)' would be invalid: invalid application of 'sizeof' to an incomplete type 'class nonexistent'}}
struct r3 {};
using r3i1 = r3<int>;
using r3i2 = r3<A>;
using r3i3 = r3<A &>;
using r3i4 = r3<void>; // expected-error{{constraints not satisfied for class template 'r3' [with T = void]}}
-using r3i4 = r3<class nonexistent>; // expected-error{{constraints not satisfied for class template 'r3' [with T = nonexistent]}}
+using r3i4 = r3<class nonexistent>; // expected-error{{constraints not satisfied for class template 'r3' [with T = class nonexistent]}}
template<typename T> requires requires (T t) { 0; "a"; (void)'a'; }
struct r4 {};
diff --git a/clang/test/CXX/expr/expr.prim/expr.prim.req/type-requirement.cpp b/clang/test/CXX/expr/expr.prim/expr.prim.req/type-requirement.cpp
index 5433cfb..28dff33 100644
--- a/clang/test/CXX/expr/expr.prim/expr.prim.req/type-requirement.cpp
+++ b/clang/test/CXX/expr/expr.prim/expr.prim.req/type-requirement.cpp
@@ -182,14 +182,14 @@ namespace std_example {
static_assert(C1<has_inner_and_type> && C2<has_inner_and_type> && C3<has_inner_and_type>);
template<C1 T> struct C1_check {};
// expected-note@-1 {{because 'int' does not satisfy 'C1'}}
- // expected-note@-2 {{because 'std_example::has_type' does not satisfy 'C1'}}
+ // expected-note@-2 {{because 'has_type' does not satisfy 'C1'}}
template<C2 T> struct C2_check {};
- // expected-note@-1 {{because 'std_example::has_inner' does not satisfy 'C2'}}
+ // expected-note@-1 {{because 'has_inner' does not satisfy 'C2'}}
template<C3 T> struct C3_check {};
// expected-note@-1 {{because 'void' does not satisfy 'C3'}}
using c1 = C1_check<int>; // expected-error{{constraints not satisfied for class template 'C1_check' [with T = int]}}
- using c2 = C1_check<has_type>; // expected-error{{constraints not satisfied for class template 'C1_check' [with T = std_example::has_type]}}
- using c3 = C2_check<has_inner>; // expected-error{{constraints not satisfied for class template 'C2_check' [with T = std_example::has_inner]}}
+ using c2 = C1_check<has_type>; // expected-error{{constraints not satisfied for class template 'C1_check' [with T = has_type]}}
+ using c3 = C2_check<has_inner>; // expected-error{{constraints not satisfied for class template 'C2_check' [with T = has_inner]}}
using c4 = C3_check<void>; // expected-error{{constraints not satisfied for class template 'C3_check' [with T = void]}}
}
@@ -199,10 +199,10 @@ template <typename T> concept C = requires { requires requires { T::a; }; };
// expected-note@-1 {{because 'T::a' would be invalid: no member named 'a' in 'PR48656::T1'}}
template <C...> struct A {};
-// expected-note@-1 {{because 'PR48656::T1' does not satisfy 'C'}}
+// expected-note@-1 {{because 'T1' does not satisfy 'C'}}
struct T1 {};
-template struct A<T1>; // expected-error {{constraints not satisfied for class template 'A' [with $0 = <PR48656::T1>]}}
+template struct A<T1>; // expected-error {{constraints not satisfied for class template 'A' [with $0 = <T1>]}}
struct T2 { static constexpr bool a = false; };
template struct A<T2>;
diff --git a/clang/test/CXX/temp/temp.constr/temp.constr.atomic/constrant-satisfaction-conversions.cpp b/clang/test/CXX/temp/temp.constr/temp.constr.atomic/constrant-satisfaction-conversions.cpp
index 59e6a48..6dea0c6 100644
--- a/clang/test/CXX/temp/temp.constr/temp.constr.atomic/constrant-satisfaction-conversions.cpp
+++ b/clang/test/CXX/temp/temp.constr/temp.constr.atomic/constrant-satisfaction-conversions.cpp
@@ -28,9 +28,8 @@ template<typename T> requires requires {
requires S<T>{};
// expected-error@-1{{atomic constraint must be of type 'bool' (found 'S<int>')}}
// expected-note@-2{{while checking the satisfaction}}
- // expected-note@-3{{in instantiation of requirement}}
- // expected-note@-4{{while checking the satisfaction}}
- // expected-note@-6{{while substituting template arguments}}
+ // expected-note@-3{{while checking the satisfaction of nested requirement}}
+ // expected-note@-5{{while substituting template arguments}}
// expected-note@#F3INST{{while checking constraint satisfaction}}
// expected-note@#F3INST{{while substituting deduced template arguments into function template 'f3' [with T = int]}}
//
diff --git a/clang/test/CXX/temp/temp.constr/temp.constr.normal/p1.cpp b/clang/test/CXX/temp/temp.constr/temp.constr.normal/p1.cpp
index 3992835..34c5c5d 100644
--- a/clang/test/CXX/temp/temp.constr/temp.constr.normal/p1.cpp
+++ b/clang/test/CXX/temp/temp.constr/temp.constr.normal/p1.cpp
@@ -1,21 +1,31 @@
// RUN: %clang_cc1 -std=c++2a -x c++ -verify %s
+// RUN: %clang_cc1 -std=c++2c -x c++ -verify %s
template<typename T> concept True = true;
-template<typename T> concept Foo = True<T*>;
-template<typename T> concept Bar = Foo<T&>;
-template<typename T> requires Bar<T> struct S { };
-template<typename T> requires Bar<T> && true struct S<T> { };
+template<typename T> concept Foo = True<T*>; // #Foo
+template<typename T> concept Bar = Foo<T&>; // #Bar
+template<typename T> requires Bar<T> struct S { }; // #S
+template<typename T> requires Bar<T> && true struct S<T> { }; // #SpecS
+// expected-error@-1 {{class template partial specialization is not more specialized than the primary template}}
+// expected-error@#Foo 2{{'type name' declared as a pointer to a reference of type 'T &'}}
+// expected-note@#SpecS {{while substituting into concept arguments here}}
+// expected-note@#S {{while substituting into concept arguments here}}
+// expected-note@#Bar 2{{while substituting into concept arguments here}}
+// expected-note@#S {{template is declared here}}
+
+
template<typename T> concept True2 = sizeof(T) >= 0;
-template<typename T> concept Foo2 = True2<T*>;
-// expected-error@-1{{'type name' declared as a pointer to a reference of type 'type-parameter-0-0 &'}}
-template<typename T> concept Bar2 = Foo2<T&>;
-// expected-note@-1{{while substituting into concept arguments here; substitution failures not allowed in concept arguments}}
-template<typename T> requires Bar2<T> struct S2 { };
+template<typename T> concept Foo2 = True2<T*>; // #Foo2
+
+template<typename T> concept Bar2 = Foo2<T&>; // #Bar2
+// expected-note@-1 3{{while substituting into concept arguments here; substitution failures not allowed in concept arguments}}
+template<typename T> requires Bar2<T> struct S2 { }; // #SpecS2_1
// expected-note@-1{{template is declared here}}
-template<typename T> requires Bar2<T> && true struct S2<T> { };
+template<typename T> requires Bar2<T> && true struct S2<T> { }; // #SpecS2_2
// expected-error@-1{{class template partial specialization is not more specialized than the primary template}}
-// expected-note@-2{{while calculating associated constraint of template 'S2<T>' here}}
+// expected-error@#Foo2{{'type name' declared as a pointer to a reference of type 'T &'}}
+
namespace type_pack {
template<typename... Args>
@@ -71,16 +81,31 @@ namespace non_type_pack {
namespace PR47174 {
// This checks that we don't crash with a failed substitution on the first constrained argument when
// performing normalization.
-template <Bar2 T, True U>
+template <Bar2 T, True U> // #S3_Header
requires true struct S3; // expected-note {{template is declared here}}
template <True T, True U>
-requires true struct S3<T, U>; // expected-error {{class template partial specialization is not more specialized than the primary template}}
+requires true struct S3<T, U>;
+// expected-error@-1 {{class template partial specialization is not more specialized than the primary template}}
+// expected-error@#Foo2 2{{'type name' declared as a pointer to a reference of type 'T &'}}
+// expected-note@#SpecS2_1 {{while substituting into concept arguments here}}
+// expected-note@#SpecS2_2 {{while substituting into concept arguments here}}
+// expected-note@#S3_Header {{while substituting into concept arguments here}}
+// expected-note@#Bar2 {{while substituting into concept arguments here}}
+
// Same as above, for the second position (but this was already working).
-template <True T, Bar2 U>
-requires true struct S4; // expected-note {{template is declared here}}
+template <True T, Bar2 U> // #S4_Header
+requires true struct S4; // #S4
template <True T, True U>
-requires true struct S4<T, U>; // expected-error {{class template partial specialization is not more specialized than the primary template}}
+requires true struct S4<T, U>; // #S4-spec
+// expected-error@-1 {{class template partial specialization is not more specialized than the primary template}}
+// expected-error@#Foo2 {{'type name' declared as a pointer to a reference of type 'U &'}}
+// expected-note@#S4_Header {{while substituting into concept arguments here}}
+// expected-note@#S4 {{template is declared here}}
+// expected-note@#S4 {{similar constraint expressions not considered equivalent}}
+// expected-note@#S4-spec {{similar constraint expression here}}
+
+
struct X {
template<int> struct Y {
@@ -96,7 +121,7 @@ template<class T> requires C1<T> && C2<T> void t1() = delete; // expected-note {
template void t1<X>();
void t1() { t1<X>(); } // expected-error {{call to deleted function 't1'}}
-template<class T> requires C1<T> void t2() {}; // expected-note 2 {{candidate function}}
+template<class T> requires C1<T> void t2() {}; // expected-note 2 {{candidate function}}
template<class T> requires C2<T> void t2() {}; // expected-note 2 {{candidate function}}
template void t2<X>(); // expected-error {{partial ordering for explicit instantiation of 't2' is ambiguous}}
void t2() { t2<X>(); } // expected-error {{call to 't2' is ambiguous}}
diff --git a/clang/test/CXX/temp/temp.param/p10-2a.cpp b/clang/test/CXX/temp/temp.param/p10-2a.cpp
index 4f5fdd3..c0406f8 100644
--- a/clang/test/CXX/temp/temp.param/p10-2a.cpp
+++ b/clang/test/CXX/temp/temp.param/p10-2a.cpp
@@ -86,16 +86,18 @@ using f1 = F<int>;
using f2 = F<long>; // expected-error {{constraints not satisfied for alias template 'F' [with T = long]}}
template<typename T, typename... Ts>
-concept OneOf = (is_same_v<T, Ts> || ...);
-// expected-note@-1 2{{because 'is_same_v<char, char[1]>' evaluated to false}}
-// expected-note@-2 2{{and 'is_same_v<char, char[2]>' evaluated to false}}
-// expected-note@-3 {{because 'is_same_v<short, int>' evaluated to false}}
-// expected-note@-4 {{and 'is_same_v<short, long>' evaluated to false}}
-// expected-note@-5 {{and 'is_same_v<short, char>' evaluated to false}}
-// expected-note@-6 3{{because 'is_same_v<int, char[1]>' evaluated to false}}
-// expected-note@-7 3{{and 'is_same_v<int, char[2]>' evaluated to false}}
-// expected-note@-8 2{{because 'is_same_v<std::nullptr_t, char>' evaluated to false}}
-// expected-note@-9 2{{and 'is_same_v<std::nullptr_t, int>' evaluated to false}}
+concept OneOf = (is_same_v<T, Ts> || ...); // #OneOf
+// expected-note@#OneOf 2{{because 'is_same_v<char, char[1]>' evaluated to false}}
+// expected-note@#OneOf 2{{and 'is_same_v<char, char[2]>' evaluated to false}}
+// expected-note@#OneOf {{because 'is_same_v<short, int>' evaluated to false}}
+// expected-note@#OneOf {{and 'is_same_v<short, long>' evaluated to false}}
+// expected-note@#OneOf {{and 'is_same_v<short, char>' evaluated to false}}
+// expected-note@#OneOf 3{{because 'is_same_v<int, char[1]>' evaluated to false}}
+// expected-note@#OneOf 3{{and 'is_same_v<int, char[2]>' evaluated to false}}
+// expected-note@#OneOf {{because 'is_same_v<decltype(nullptr), char>' evaluated to false}}
+// expected-note@#OneOf {{because 'is_same_v<std::nullptr_t, char>' evaluated to false}}
+// expected-note@#OneOf {{and 'is_same_v<std::nullptr_t, int>' evaluated to false}}
+// expected-note@#OneOf {{and 'is_same_v<decltype(nullptr), int>' evaluated to false}}
template<OneOf<char[1], char[2]> T, OneOf<int, long, char> U>
// expected-note@-1 2{{because 'OneOf<char, char[1], char[2]>' evaluated to false}}
@@ -124,6 +126,7 @@ using I = int;
using i1 = I<1>;
using i2 = I<'a'>;
+// FIXME: This crashes with -std=c++2c
using i3 = I<nullptr>;
// expected-error@-1 {{constraints not satisfied for alias template 'I' [with x = nullptr]}}
diff --git a/clang/test/CodeGen/X86/avx-cxx-record.cpp b/clang/test/CodeGen/X86/avx-cxx-record.cpp
index 6ce6815..b20bcdd 100644
--- a/clang/test/CodeGen/X86/avx-cxx-record.cpp
+++ b/clang/test/CodeGen/X86/avx-cxx-record.cpp
@@ -1,7 +1,9 @@
// RUN: %clang_cc1 %s -triple x86_64-unknown-linux-gnu -emit-llvm -O2 -target-cpu x86-64-v3 -o - | FileCheck %s
// RUN: %clang_cc1 %s -triple x86_64-unknown-linux-gnu -emit-llvm -O2 -target-cpu x86-64-v3 -fclang-abi-compat=20 -o - | FileCheck --check-prefix CLANG-20 %s
+// RUN: %clang_cc1 %s -triple x86_64-sie-ps4 -emit-llvm -O2 -target-cpu x86-64-v3 -o - | FileCheck --check-prefix CLANG-20 %s
using UInt64x2 = unsigned long long __attribute__((__vector_size__(16), may_alias));
+using UInt64x4 = unsigned long long __attribute__((__vector_size__(32), may_alias));
template<int id>
struct XMM1 {
@@ -23,3 +25,24 @@ XMM2 foo() {
((XMM1<1>*)&result)->x = UInt64x2{3, 4};
return result;
}
+
+template<int id>
+struct YMM1 {
+ UInt64x4 x;
+};
+
+struct YMM2 : YMM1<0>, YMM1<1> {
+};
+
+// CHECK: define{{.*}} @_Z3barv({{.*}} [[ARG:%.*]]){{.*}}
+// CLANG-20: define{{.*}} <8 x double> @_Z3barv()
+// CHECK: entry:
+// CHECK-NEXT: store {{.*}}, ptr [[ARG]]{{.*}}
+// CHECK-NEXT: [[TMP1:%.*]] = getelementptr {{.*}}, ptr [[ARG]]{{.*}}
+// CHECK-NEXT: store {{.*}}, ptr [[TMP1]]{{.*}}
+YMM2 bar() {
+ YMM2 result;
+ ((YMM1<0>*)&result)->x = UInt64x4{1, 2, 3, 4};
+ ((YMM1<1>*)&result)->x = UInt64x4{5, 6, 7, 8};
+ return result;
+}
diff --git a/clang/test/CodeGen/X86/avx512ifma-builtins.c b/clang/test/CodeGen/X86/avx512ifma-builtins.c
index 7c7c492..eebefb0 100644
--- a/clang/test/CodeGen/X86/avx512ifma-builtins.c
+++ b/clang/test/CodeGen/X86/avx512ifma-builtins.c
@@ -3,6 +3,11 @@
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror | FileCheck %s
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
#include <immintrin.h>
diff --git a/clang/test/CodeGen/X86/avx512ifmavl-builtins.c b/clang/test/CodeGen/X86/avx512ifmavl-builtins.c
index c115b60..89108fc 100644
--- a/clang/test/CodeGen/X86/avx512ifmavl-builtins.c
+++ b/clang/test/CodeGen/X86/avx512ifmavl-builtins.c
@@ -3,6 +3,12 @@
// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=i386-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c %s -flax-vector-conversions=none -ffreestanding -triple=i386-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=i386-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
+
#include <immintrin.h>
__m128i test_mm_madd52hi_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
diff --git a/clang/test/CodeGen/X86/avxifma-builtins.c b/clang/test/CodeGen/X86/avxifma-builtins.c
index dd0f220..aa15159 100644
--- a/clang/test/CodeGen/X86/avxifma-builtins.c
+++ b/clang/test/CodeGen/X86/avxifma-builtins.c
@@ -3,6 +3,12 @@
// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror | FileCheck %s
// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
+
+
#include <immintrin.h>
__m128i test_mm_madd52hi_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
diff --git a/clang/test/CodeGenHLSL/RootSignature.hlsl b/clang/test/CodeGenHLSL/RootSignature.hlsl
index bc40bdd..eaff3a9 100644
--- a/clang/test/CodeGenHLSL/RootSignature.hlsl
+++ b/clang/test/CodeGenHLSL/RootSignature.hlsl
@@ -82,8 +82,8 @@ void RootDescriptorsEntry() {}
// checking minLOD, maxLOD
// CHECK-SAME: float -1.280000e+02, float 1.280000e+02,
-// checking register, space and visibility
-// CHECK-SAME: i32 42, i32 0, i32 0}
+// checking register, space, visibility and flag
+// CHECK-SAME: i32 42, i32 0, i32 0, i32 1}
#define SampleStaticSampler \
"StaticSampler(s42, " \
@@ -96,6 +96,7 @@ void RootDescriptorsEntry() {}
" borderColor = STATIC_BORDER_COLOR_OPAQUE_WHITE, " \
" minLOD = -128.f, maxLOD = 128.f, " \
" space = 0, visibility = SHADER_VISIBILITY_ALL, " \
+ " flags = UINT_BORDER_COLOR" \
")"
[shader("compute"), RootSignature(SampleStaticSampler)]
[numthreads(1,1,1)]
diff --git a/clang/test/CodeGenHLSL/resources/RWStructuredBuffer-elementtype.hlsl b/clang/test/CodeGenHLSL/resources/RWStructuredBuffer-elementtype.hlsl
index 472b9a8..9f0a5b7 100644
--- a/clang/test/CodeGenHLSL/resources/RWStructuredBuffer-elementtype.hlsl
+++ b/clang/test/CodeGenHLSL/resources/RWStructuredBuffer-elementtype.hlsl
@@ -1,23 +1,36 @@
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.2-compute -finclude-default-header -fnative-half-type -emit-llvm -o - %s | FileCheck %s -check-prefixes=CHECK
// RUN: %clang_cc1 -triple spirv-unknown-vulkan1.3-compute -finclude-default-header -fnative-half-type -emit-llvm -o - %s | FileCheck %s -check-prefixes=SPV
-// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", i16, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.0" = type { target("dx.RawBuffer", i16, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.1" = type { target("dx.RawBuffer", i32, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.2" = type { target("dx.RawBuffer", i32, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.3" = type { target("dx.RawBuffer", i64, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.4" = type { target("dx.RawBuffer", i64, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.5" = type { target("dx.RawBuffer", half, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.6" = type { target("dx.RawBuffer", float, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.7" = type { target("dx.RawBuffer", double, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.8" = type { target("dx.RawBuffer", <4 x i16>, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.9" = type { target("dx.RawBuffer", <3 x i32>, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.10" = type { target("dx.RawBuffer", <2 x half>, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.11" = type { target("dx.RawBuffer", <3 x float>, 1, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer.12" = type { target("dx.RawBuffer", i32, 1, 0) }
-// SPV: %"class.hlsl::RWStructuredBuffer.12" = type { target("spirv.VulkanBuffer", [0 x i32], 12, 1)
-// CHECK: %"class.hlsl::RWStructuredBuffer.13" = type { target("dx.RawBuffer", <4 x i32>, 1, 0) }
-// SPV: %"class.hlsl::RWStructuredBuffer.13" = type { target("spirv.VulkanBuffer", [0 x <4 x i32>], 12, 1)
+// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", i16, 1, 0), target("dx.RawBuffer", i16, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer" = type { target("spirv.VulkanBuffer", [0 x i16], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.0" = type { target("dx.RawBuffer", i16, 1, 0), target("dx.RawBuffer", i16, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.0" = type { target("spirv.VulkanBuffer", [0 x i16], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.1" = type { target("dx.RawBuffer", i32, 1, 0), target("dx.RawBuffer", i32, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.1" = type { target("spirv.VulkanBuffer", [0 x i32], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.2" = type { target("dx.RawBuffer", i32, 1, 0), target("dx.RawBuffer", i32, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.2" = type { target("spirv.VulkanBuffer", [0 x i32], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.3" = type { target("dx.RawBuffer", i64, 1, 0), target("dx.RawBuffer", i64, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.3" = type { target("spirv.VulkanBuffer", [0 x i64], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.4" = type { target("dx.RawBuffer", i64, 1, 0), target("dx.RawBuffer", i64, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.4" = type { target("spirv.VulkanBuffer", [0 x i64], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.5" = type { target("dx.RawBuffer", half, 1, 0), target("dx.RawBuffer", half, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.5" = type { target("spirv.VulkanBuffer", [0 x half], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.6" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.6" = type { target("spirv.VulkanBuffer", [0 x float], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.7" = type { target("dx.RawBuffer", double, 1, 0), target("dx.RawBuffer", double, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.7" = type { target("spirv.VulkanBuffer", [0 x double], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.8" = type { target("dx.RawBuffer", <4 x i16>, 1, 0), target("dx.RawBuffer", <4 x i16>, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.8" = type { target("spirv.VulkanBuffer", [0 x <4 x i16>], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.9" = type { target("dx.RawBuffer", <3 x i32>, 1, 0), target("dx.RawBuffer", <3 x i32>, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.9" = type { target("spirv.VulkanBuffer", [0 x <3 x i32>], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.10" = type { target("dx.RawBuffer", <2 x half>, 1, 0), target("dx.RawBuffer", <2 x half>, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.10" = type { target("spirv.VulkanBuffer", [0 x <2 x half>], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.11" = type { target("dx.RawBuffer", <3 x float>, 1, 0), target("dx.RawBuffer", <3 x float>, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.11" = type { target("spirv.VulkanBuffer", [0 x <3 x float>], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.12" = type { target("dx.RawBuffer", i32, 1, 0), target("dx.RawBuffer", i32, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.12" = type { target("spirv.VulkanBuffer", [0 x i32], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
+// CHECK: %"class.hlsl::RWStructuredBuffer.13" = type { target("dx.RawBuffer", <4 x i32>, 1, 0), target("dx.RawBuffer", <4 x i32>, 1, 0) }
+// SPV: %"class.hlsl::RWStructuredBuffer.13" = type { target("spirv.VulkanBuffer", [0 x <4 x i32>], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) }
RWStructuredBuffer<int16_t> BufI16;
RWStructuredBuffer<uint16_t> BufU16;
diff --git a/clang/test/CodeGenHLSL/resources/RasterizerOrderedStructuredBuffer-elementtype.hlsl b/clang/test/CodeGenHLSL/resources/RasterizerOrderedStructuredBuffer-elementtype.hlsl
index 6c5a705..c97ad42 100644
--- a/clang/test/CodeGenHLSL/resources/RasterizerOrderedStructuredBuffer-elementtype.hlsl
+++ b/clang/test/CodeGenHLSL/resources/RasterizerOrderedStructuredBuffer-elementtype.hlsl
@@ -5,19 +5,19 @@ struct MyStruct {
int2 b;
};
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer" = type { target("dx.RawBuffer", i16, 1, 1) }
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.0" = type { target("dx.RawBuffer", i16, 1, 1) }
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.1" = type { target("dx.RawBuffer", i32, 1, 1) }
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.2" = type { target("dx.RawBuffer", i32, 1, 1) }
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.3" = type { target("dx.RawBuffer", i64, 1, 1) }
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.4" = type { target("dx.RawBuffer", i64, 1, 1) }
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.5" = type { target("dx.RawBuffer", half, 1, 1) }
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.6" = type { target("dx.RawBuffer", float, 1, 1) }
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.7" = type { target("dx.RawBuffer", double, 1, 1) }
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.8" = type { target("dx.RawBuffer", <4 x i16>, 1, 1) }
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.9" = type { target("dx.RawBuffer", <3 x i32>, 1, 1) }
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.10" = type { target("dx.RawBuffer", <2 x half>, 1, 1) }
-// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.11" = type { target("dx.RawBuffer", <3 x float>, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer" = type { target("dx.RawBuffer", i16, 1, 1), target("dx.RawBuffer", i16, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.0" = type { target("dx.RawBuffer", i16, 1, 1), target("dx.RawBuffer", i16, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.1" = type { target("dx.RawBuffer", i32, 1, 1), target("dx.RawBuffer", i32, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.2" = type { target("dx.RawBuffer", i32, 1, 1), target("dx.RawBuffer", i32, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.3" = type { target("dx.RawBuffer", i64, 1, 1), target("dx.RawBuffer", i64, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.4" = type { target("dx.RawBuffer", i64, 1, 1), target("dx.RawBuffer", i64, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.5" = type { target("dx.RawBuffer", half, 1, 1), target("dx.RawBuffer", half, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.6" = type { target("dx.RawBuffer", float, 1, 1), target("dx.RawBuffer", float, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.7" = type { target("dx.RawBuffer", double, 1, 1), target("dx.RawBuffer", double, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.8" = type { target("dx.RawBuffer", <4 x i16>, 1, 1), target("dx.RawBuffer", <4 x i16>, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.9" = type { target("dx.RawBuffer", <3 x i32>, 1, 1), target("dx.RawBuffer", <3 x i32>, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.10" = type { target("dx.RawBuffer", <2 x half>, 1, 1), target("dx.RawBuffer", <2 x half>, 1, 1) }
+// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.11" = type { target("dx.RawBuffer", <3 x float>, 1, 1), target("dx.RawBuffer", <3 x float>, 1, 1) }
// DXIL: %struct.MyStruct = type <{ <4 x float>, <2 x i32> }>
RasterizerOrderedStructuredBuffer<int16_t> BufI16;
diff --git a/clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl b/clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl
index 4f005ea..89a66b0 100644
--- a/clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl
+++ b/clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl
@@ -21,8 +21,8 @@ export void foo() {
}
// CHECK-DXIL: %"class.hlsl::StructuredBuffer" = type { target("dx.RawBuffer", float, 0, 0) }
-// CHECK-DXIL: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0) }
-// CHECK-DXIL: %"class.hlsl::AppendStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0) }
+// CHECK-DXIL: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) }
+// CHECK-DXIL: %"class.hlsl::AppendStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) }
// CHECK: @Buf1 = internal global %"class.hlsl::StructuredBuffer" poison, align 4
// CHECK: @[[Buf1Str:.*]] = private unnamed_addr constant [5 x i8] c"Buf1\00", align 1
diff --git a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl
index 93aa218..43ddd2e 100644
--- a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl
+++ b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl
@@ -10,9 +10,9 @@ AppendStructuredBuffer<float> ASB : register(u2);
ConsumeStructuredBuffer<float> CSB : register(u3);
// CHECK: %"class.hlsl::StructuredBuffer" = type { target("dx.RawBuffer", float, 0, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0) }
-// CHECK: %"class.hlsl::AppendStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0) }
-// CHECK: %"class.hlsl::ConsumeStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0) }
+// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) }
+// CHECK: %"class.hlsl::AppendStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) }
+// CHECK: %"class.hlsl::ConsumeStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) }
export int TestIncrementCounter() {
return RWSB1.IncrementCounter();
diff --git a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl
index b513963..9e08a6d 100644
--- a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl
+++ b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl
@@ -6,7 +6,7 @@
RWStructuredBuffer<float> RWSB1, RWSB2;
RasterizerOrderedStructuredBuffer<float> ROSB1, ROSB2;
-// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0) }
+// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) }
export void TestIncrementCounter() {
// CHECK: define void @_Z20TestIncrementCounterv()
diff --git a/clang/test/CodeGenHLSL/resources/resource-bindings.hlsl b/clang/test/CodeGenHLSL/resources/resource-bindings.hlsl
index 4ffa7cf..1d85048 100644
--- a/clang/test/CodeGenHLSL/resources/resource-bindings.hlsl
+++ b/clang/test/CodeGenHLSL/resources/resource-bindings.hlsl
@@ -4,7 +4,7 @@
// CHECK: %"class.hlsl::RWBuffer" = type { target("dx.TypedBuffer", <4 x float>, 1, 0, 0) }
// CHECK: %"class.hlsl::RWBuffer.0" = type { target("dx.TypedBuffer", float, 1, 0, 0) }
// CHECK: %"class.hlsl::StructuredBuffer" = type { target("dx.RawBuffer", i32, 0, 0) }
-// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", %struct.S, 1, 0) }
+// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", %struct.S, 1, 0), target("dx.RawBuffer", %struct.S, 1, 0) }
// CHECK: %"class.hlsl::RWBuffer.1" = type { target("dx.TypedBuffer", double, 1, 0, 0) }
// CHECK: @_ZL4U0S0 = internal global %"class.hlsl::RWBuffer" poison, align 4
diff --git a/clang/test/Driver/riscv-cpus.c b/clang/test/Driver/riscv-cpus.c
index cd92adc..5d5fdd7 100644
--- a/clang/test/Driver/riscv-cpus.c
+++ b/clang/test/Driver/riscv-cpus.c
@@ -462,7 +462,6 @@
// MCPU-SIFIVE-P450-SAME: "-target-feature" "+ziccif"
// MCPU-SIFIVE-P450-SAME: "-target-feature" "+zicclsm"
// MCPU-SIFIVE-P450-SAME: "-target-feature" "+ziccrse"
-// MCPU-SIFIVE-P450-SAME: "-target-feature" "+zicntr"
// MCPU-SIFIVE-P450-SAME: "-target-feature" "+zicsr"
// MCPU-SIFIVE-P450-SAME: "-target-feature" "+zifencei"
// MCPU-SIFIVE-P450-SAME: "-target-feature" "+zihintntl"
@@ -492,7 +491,6 @@
// MCPU-SIFIVE-P470-SAME: "-target-feature" "+ziccif"
// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zicclsm"
// MCPU-SIFIVE-P470-SAME: "-target-feature" "+ziccrse"
-// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zicntr"
// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zicsr"
// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zifencei"
// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zihintntl"
@@ -555,7 +553,6 @@
// MCPU-SIFIVE-P670-SAME: "-target-feature" "+ziccif"
// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zicclsm"
// MCPU-SIFIVE-P670-SAME: "-target-feature" "+ziccrse"
-// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zicntr"
// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zicsr"
// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zifencei"
// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zihintntl"
diff --git a/clang/test/OpenMP/amdgcn_save_temps.c b/clang/test/OpenMP/amdgcn_save_temps.c
new file mode 100644
index 0000000..d838bb1
--- /dev/null
+++ b/clang/test/OpenMP/amdgcn_save_temps.c
@@ -0,0 +1,23 @@
+
+// REQUIRES: amdgpu-registered-target
+
+// RUN: %clang_cc1 -E -fopenmp -x c -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -save-temps=cwd %s -o %t-openmp-amdgcn-amd-amdhsa-gfx90a.i
+// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -save-temps=cwd -emit-llvm-bc %s -o %t-x86_64-unknown-unknown.bc
+// RUN: %clang_cc1 -fopenmp -x c -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -save-temps=cwd -emit-llvm -fopenmp-is-target-device -x cpp-output %t-openmp-amdgcn-amd-amdhsa-gfx90a.i -fopenmp-host-ir-file-path %t-x86_64-unknown-unknown.bc -o - | FileCheck %s
+// expected-no-diagnostics
+#ifndef HEADER
+#define HEADER
+
+#define N 1000
+
+int test_amdgcn_save_temps() {
+ int arr[N];
+#pragma omp target
+ for (int i = 0; i < N; i++) {
+ arr[i] = 1;
+ }
+ return arr[0];
+}
+#endif
+
+// CHECK: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_test_amdgcn_save_temps
diff --git a/clang/test/Parser/recovery-after-expected-unqualified-id.cpp b/clang/test/Parser/recovery-after-expected-unqualified-id.cpp
new file mode 100644
index 0000000..8019b46d
--- /dev/null
+++ b/clang/test/Parser/recovery-after-expected-unqualified-id.cpp
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 -verify %s
+
+3.2 // expected-error {{expected unqualified-id}}
+
+extern "C" {
+ typedef int Int;
+}
+
+Int foo(); // Ok
diff --git a/clang/test/Sema/const-eval.c b/clang/test/Sema/const-eval.c
index 11cc7fb..53face9 100644
--- a/clang/test/Sema/const-eval.c
+++ b/clang/test/Sema/const-eval.c
@@ -138,7 +138,7 @@ EVAL_EXPR(52, &pr24622 == (void *)&PR24622);
// We evaluate these by providing 2s' complement semantics in constant
// expressions, like we do for integers.
-void *PR28739a = (__int128)(unsigned long)-1 + &PR28739a; // expected-warning {{the pointer incremented by 18446744073709551615 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2305843009213693952 elements)}}
+void *PR28739a = (__int128)(unsigned long)-1 + &PR28739a; // expected-warning {{the pointer incremented by 18'446'744'073'709'551'615 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2'305'843'009'213'693'952 elements)}}
void *PR28739b = &PR28739b + (__int128)(unsigned long)-1; // expected-warning {{refers past the last possible element}}
__int128 PR28739c = (&PR28739c + (__int128)(unsigned long)-1) - &PR28739c; // expected-warning {{refers past the last possible element}}
void *PR28739d = &(&PR28739d)[(__int128)(unsigned long)-1]; // expected-warning {{refers past the last possible element}}
diff --git a/clang/test/Sema/integer-overflow.c b/clang/test/Sema/integer-overflow.c
index 30a47aa..ba943f0 100644
--- a/clang/test/Sema/integer-overflow.c
+++ b/clang/test/Sema/integer-overflow.c
@@ -143,7 +143,7 @@ uint64_t check_integer_overflows(int i) {
(__imag__ x) = 4608 * 1024 * 1024;
// expected-warning@+4 {{overflow in expression; result is 536'870'912 with type 'int'}}
-// expected-warning@+3 {{array index 536870912 is past the end of the array (that has type 'uint64_t[10]' (aka 'unsigned long long[10]'))}}
+// expected-warning@+3 {{array index 536'870'912 is past the end of the array (that has type 'uint64_t[10]' (aka 'unsigned long long[10]'))}}
// expected-note@+1 {{array 'a' declared here}}
uint64_t a[10];
a[4608 * 1024 * 1024] = 1i;
diff --git a/clang/test/Sema/unbounded-array-bounds.c b/clang/test/Sema/unbounded-array-bounds.c
index b22261a..909286b 100644
--- a/clang/test/Sema/unbounded-array-bounds.c
+++ b/clang/test/Sema/unbounded-array-bounds.c
@@ -14,11 +14,11 @@ struct S s[]; // expected-warning {{tentative array definition}} expected-note {
void f1(void) {
++s[3].a;
++s[7073650413200313099].b;
- // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}}
- // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178956970 elements)}}
- // addr64-warning@-3 {{array index 7073650413200313099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576460752303423488 elements)}}
+ // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}}
+ // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178'956'970 elements)}}
+ // addr64-warning@-3 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576'460'752'303'423'488 elements)}}
++s[7073650].c;
- // addr16-warning@-1 {{array index 7073650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}}
+ // addr16-warning@-1 {{array index 7'073'650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}}
}
long long ll[]; // expected-warning {{tentative array definition}} expected-note {{declared here}} addr16-note {{declared here}} addr32-note {{declared here}}
@@ -26,32 +26,32 @@ long long ll[]; // expected-warning {{tentative array definition}} expected-note
void f2(void) {
++ll[3];
++ll[2705843009213693952];
- // addr16-warning@-1 {{array index 2705843009213693952 refers past the last possible element for an array in 16-bit address space containing 64-bit (8-byte) elements (max possible 8192 elements)}}
- // addr32-warning@-2 {{array index 2705843009213693952 refers past the last possible element for an array in 32-bit address space containing 64-bit (8-byte) elements (max possible 536870912 elements)}}
- // addr64-warning@-3 {{array index 2705843009213693952 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2305843009213693952 elements)}}
+ // addr16-warning@-1 {{array index 2'705'843'009'213'693'952 refers past the last possible element for an array in 16-bit address space containing 64-bit (8-byte) elements (max possible 8'192 elements)}}
+ // addr32-warning@-2 {{array index 2'705'843'009'213'693'952 refers past the last possible element for an array in 32-bit address space containing 64-bit (8-byte) elements (max possible 536'870'912 elements)}}
+ // addr64-warning@-3 {{array index 2'705'843'009'213'693'952 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2'305'843'009'213'693'952 elements)}}
++ll[847073650];
- // addr16-warning@-1 {{array index 847073650 refers past the last possible element for an array in 16-bit address space containing 64-bit (8-byte) elements (max possible 8192 elements)}}
- // addr32-warning@-2 {{array index 847073650 refers past the last possible element for an array in 32-bit address space containing 64-bit (8-byte) elements (max possible 536870912 elements)}}
+ // addr16-warning@-1 {{array index 847'073'650 refers past the last possible element for an array in 16-bit address space containing 64-bit (8-byte) elements (max possible 8'192 elements)}}
+ // addr32-warning@-2 {{array index 847'073'650 refers past the last possible element for an array in 32-bit address space containing 64-bit (8-byte) elements (max possible 536'870'912 elements)}}
}
void f3(struct S p[]) { // expected-note {{declared here}} addr16-note {{declared here}}
++p[3].a;
++p[7073650413200313099].b;
- // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}}
- // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178956970 elements)}}
- // addr64-warning@-3 {{array index 7073650413200313099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576460752303423488 elements)}}
+ // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}}
+ // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178'956'970 elements)}}
+ // addr64-warning@-3 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576'460'752'303'423'488 elements)}}
++p[7073650].c;
- // addr16-warning@-1 {{array index 7073650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}}
+ // addr16-warning@-1 {{array index 7'073'650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}}
}
void f4(struct S *p) { // expected-note {{declared here}} addr16-note {{declared here}}
p += 3;
p += 7073650413200313099;
- // addr16-warning@-1 {{the pointer incremented by 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}}
- // addr32-warning@-2 {{the pointer incremented by 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178956970 elements)}}
- // addr64-warning@-3 {{the pointer incremented by 7073650413200313099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576460752303423488 elements)}}
+ // addr16-warning@-1 {{the pointer incremented by 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}}
+ // addr32-warning@-2 {{the pointer incremented by 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178'956'970 elements)}}
+ // addr64-warning@-3 {{the pointer incremented by 7'073'650'413'200'313'099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576'460'752'303'423'488 elements)}}
p += 7073650;
- // addr16-warning@-1 {{the pointer incremented by 7073650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}}
+ // addr16-warning@-1 {{the pointer incremented by 7'073'650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}}
}
struct BQ {
@@ -63,7 +63,7 @@ struct BQ bq[]; // expected-warning {{tentative array definition}} addr16-note {
void f5(void) {
++bq[0].bigblock[0].a;
++bq[1].bigblock[0].a;
- // addr16-warning@-1 {{array index 1 refers past the last possible element for an array in 16-bit address space containing 497952-bit (62244-byte) elements (max possible 1 element)}}
+ // addr16-warning@-1 {{array index 1 refers past the last possible element for an array in 16-bit address space containing 497952-bit (62'244-byte) elements (max possible 1 element)}}
}
void f6(void) {
@@ -102,15 +102,15 @@ struct {
void fam_ily() {
++fam.tail[7073650413200313099];
- // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65536 elements)}}
- // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4294967296 elements)}}
+ // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65'536 elements)}}
+ // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4'294'967'296 elements)}}
// No warning for addr64 because the array index is inbound in that case.
++fam0.tail[7073650413200313099];
- // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65536 elements)}}
- // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4294967296 elements)}}
+ // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65'536 elements)}}
+ // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4'294'967'296 elements)}}
// No warning for addr64 because the array index is inbound in that case.
++fam1.tail[7073650413200313099];
- // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65536 elements)}}
- // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4294967296 elements)}}
+ // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65'536 elements)}}
+ // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4'294'967'296 elements)}}
// No warning for addr64 because the array index is inbound in that case.
}
diff --git a/clang/test/SemaCXX/array-bounds.cpp b/clang/test/SemaCXX/array-bounds.cpp
index b584e1e..6a40d1d 100644
--- a/clang/test/SemaCXX/array-bounds.cpp
+++ b/clang/test/SemaCXX/array-bounds.cpp
@@ -237,7 +237,7 @@ void test_pr10771() {
((char*)foo)[sizeof(foo) - 1] = '\0'; // no-warning
*(((char*)foo) + sizeof(foo) - 1) = '\0'; // no-warning
- ((char*)foo)[sizeof(foo)] = '\0'; // expected-warning {{array index 32768 is past the end of the array (that has type 'double[4096]', cast to 'char *')}}
+ ((char*)foo)[sizeof(foo)] = '\0'; // expected-warning {{array index 32'768 is past the end of the array (that has type 'double[4096]', cast to 'char *')}}
// TODO: This should probably warn, too.
*(((char*)foo) + sizeof(foo)) = '\0'; // no-warning
@@ -248,7 +248,7 @@ int test_pr11007_aux(const char * restrict, ...);
// Test checking with varargs.
void test_pr11007() {
double a[5]; // expected-note {{array 'a' declared here}}
- test_pr11007_aux("foo", a[1000]); // expected-warning {{array index 1000 is past the end of the array (that has type 'double[5]')}}
+ test_pr11007_aux("foo", a[1000]); // expected-warning {{array index 1'000 is past the end of the array (that has type 'double[5]')}}
}
void test_rdar10916006(void)
diff --git a/clang/test/SemaCXX/constant-expression-cxx14.cpp b/clang/test/SemaCXX/constant-expression-cxx14.cpp
index 1743e0e..bea90ff 100644
--- a/clang/test/SemaCXX/constant-expression-cxx14.cpp
+++ b/clang/test/SemaCXX/constant-expression-cxx14.cpp
@@ -1047,7 +1047,7 @@ constexpr int S = sum(Cs); // expected-error{{must be initialized by a constant
constexpr void PR28739(int n) { // cxx14_20-error {{never produces a constant}}
int *p = &n; // expected-note {{array 'p' declared here}}
p += (__int128)(unsigned long)-1; // cxx14_20-note {{cannot refer to element 18446744073709551615 of non-array object in a constant expression}}
- // expected-warning@-1 {{the pointer incremented by 18446744073709551615 refers past the last possible element for an array in 64-bit address space containing 32-bit (4-byte) elements (max possible 4611686018427387904 elements)}}
+ // expected-warning@-1 {{the pointer incremented by 18'446'744'073'709'551'615 refers past the last possible element for an array in 64-bit address space containing 32-bit (4-byte) elements (max possible 4'611'686'018'427'387'904 elements)}}
}
constexpr void Void(int n) {
diff --git a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
index 2f1817d..404b928 100644
--- a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
+++ b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
@@ -586,3 +586,35 @@ Baz a{};
static_assert(__is_same(decltype(a), A<A<int>>));
} // namespace GH133132
+
+namespace GH131408 {
+
+struct Node {};
+
+template <class T, Node>
+struct A {
+ A(T) {}
+};
+
+template <class T>
+using AA = A<T, {}>;
+
+AA a{0};
+
+static_assert(__is_same(decltype(a), A<int, Node{}>));
+}
+
+namespace GH130604 {
+template <typename T> struct A {
+ A(T);
+};
+
+template <typename T, template <typename> class TT = A> using Alias = TT<T>; // #gh130604-alias
+template <typename T> using Alias2 = Alias<T>;
+
+Alias2 a(42);
+// expected-error@-1 {{no viable constructor or deduction guide for deduction of template arguments of 'Alias2'}}
+Alias b(42);
+// expected-error@-1 {{alias template 'Alias' requires template arguments; argument deduction only allowed for class templates or alias template}}
+// expected-note@#gh130604-alias {{template is declared here}}
+}
diff --git a/clang/test/SemaCXX/cxx23-assume.cpp b/clang/test/SemaCXX/cxx23-assume.cpp
index 99a82d9..ce86266 100644
--- a/clang/test/SemaCXX/cxx23-assume.cpp
+++ b/clang/test/SemaCXX/cxx23-assume.cpp
@@ -127,13 +127,12 @@ struct F {
template <typename T>
constexpr int f5() requires C<T> { return 1; } // expected-note {{while checking the satisfaction}}
- // expected-note@-1 {{while substituting template arguments}}
- // expected-note@-2 {{candidate template ignored}}
+ // expected-note@-1 {{candidate template ignored}}
template <typename T>
-constexpr int f5() requires (!C<T>) { return 2; } // expected-note 4 {{while checking the satisfaction}}
- // expected-note@-1 4 {{while substituting template arguments}}
- // expected-note@-2 {{candidate template ignored}}
+constexpr int f5() requires (!C<T>) { return 2; } // expected-note 4 {{while checking the satisfaction}} \
+ // expected-note 4 {{while substituting template arguments}} \
+ // expected-note {{candidate template ignored}}
static_assert(f5<int>() == 1);
static_assert(f5<D>() == 1); // expected-note 3 {{while checking constraint satisfaction}}
diff --git a/clang/test/SemaCXX/cxx2b-deducing-this.cpp b/clang/test/SemaCXX/cxx2b-deducing-this.cpp
index 74b3573..6777dc2 100644
--- a/clang/test/SemaCXX/cxx2b-deducing-this.cpp
+++ b/clang/test/SemaCXX/cxx2b-deducing-this.cpp
@@ -1257,13 +1257,13 @@ void f() {
(&A::e)(a, a);
// expected-error@-1 {{no matching function for call to 'e'}} \
// expected-note@#tpl-address-e{{candidate template ignored: constraints not satisfied [with T = A, U = A]}} \
- // expected-note@#tpl-address-e{{because '__is_same(tpl_address::A, int)' evaluated to false}}
+ // expected-note@#tpl-address-e{{because '__is_same(A, int)' evaluated to false}}
(&A::e<A>)(a, 0);
(&A::e<A>)(a, a);
// expected-error@-1 {{no matching function for call to 'e'}} \
// expected-note@#tpl-address-e{{candidate template ignored: constraints not satisfied [with T = A, U = A]}} \
- // expected-note@#tpl-address-e{{because '__is_same(tpl_address::A, int)' evaluated to false}}
+ // expected-note@#tpl-address-e{{because '__is_same(A, int)' evaluated to false}}
(&A::e<A, int>)(a, 0);
@@ -1273,12 +1273,12 @@ void f() {
(&A::f<A>)(a);
// expected-error@-1 {{no matching function for call to 'f'}} \
// expected-note@#tpl-address-f{{candidate template ignored: constraints not satisfied [with T = A]}} \
- // expected-note@#tpl-address-f{{because '__is_same(tpl_address::A, int)' evaluated to false}}
+ // expected-note@#tpl-address-f{{because '__is_same(A, int)' evaluated to false}}
(&A::f)(a);
// expected-error@-1 {{no matching function for call to 'f'}} \
// expected-note@#tpl-address-f{{candidate template ignored: constraints not satisfied [with T = A]}} \
- // expected-note@#tpl-address-f{{because '__is_same(tpl_address::A, int)' evaluated to false}}
+ // expected-note@#tpl-address-f{{because '__is_same(A, int)' evaluated to false}}
(&A::g)(a);
(&A::g)(a, 0);
diff --git a/clang/test/SemaCXX/cxx2c-fold-exprs.cpp b/clang/test/SemaCXX/cxx2c-fold-exprs.cpp
index 4220486..137f46e 100644
--- a/clang/test/SemaCXX/cxx2c-fold-exprs.cpp
+++ b/clang/test/SemaCXX/cxx2c-fold-exprs.cpp
@@ -1,7 +1,7 @@
// RUN: %clang_cc1 -std=c++2c -verify %s
-template <class T> concept A = true;
-template <class T> concept C = A<T> && true;
+template <class T> concept A = (T(), true);
+template <class T> concept C = A<T> && true; // #C
template <class T> concept D = A<T> && __is_same(T, int);
@@ -40,13 +40,23 @@ constexpr int i(T...) { return 1; }; // expected-note {{candidate}}
static_assert(i(0) == 1); // expected-error {{call to 'i' is ambiguous}}
-template <class... T> requires (A<T> || ... || true)
-constexpr int j(T...) { return 0; };
-template <class... T> requires (C<T> && ... && true)
-constexpr int j(T...) { return 1; };
+template <class... T> requires (A<T> || ... || true) constexpr int j(T...) { return 0; }; // #j1
+template <class... T> requires (C<T> && ... && true) constexpr int j(T...) { return 1; }; // #j2
static_assert(j(0) == 1);
+// expected-error@-1 {{call to 'j' is ambiguous}}
+// expected-note@#j1 {{candidate function [with T = <int>]}}
+// expected-note@#j2 {{candidate function [with T = <int>]}}
+// expected-note@#j2 {{imilar constraint expressions not considered equivalent}}
+// expected-note@#j1 {{similar constraint expression here}}
+
+
static_assert(j() == 1);
+// expected-error@-1 {{call to 'j' is ambiguous}}
+// expected-note@#j1 {{candidate function [with T = <>]}}
+// expected-note@#j2 {{candidate function [with T = <>]}}
+// expected-note@#j2 {{imilar constraint expressions not considered equivalent}}
+// expected-note@#j1 {{similar constraint expression here}}
@@ -107,7 +117,7 @@ void test() {
}
namespace substitution {
- struct S {
+struct S {
using type = int;
};
@@ -144,51 +154,69 @@ consteval int Or3() requires (C<typename T::type> || ... || C<typename U::type>)
static_assert(And1<>() == 1);
static_assert(And1<S>() == 1);
static_assert(And1<S, S>() == 1);
+// FIXME: The diagnostics are not so great
static_assert(And1<int>() == 1); // expected-error {{no matching function for call to 'And1'}}
- // expected-note@#and1 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#and1 {{because substituted constraint expression is ill-formed}}
+ // expected-note@#and1 {{candidate template ignored: constraints not satisfied [with T = <int>]}}
+ // expected-note@#and1 {{because 'typename T::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
static_assert(And1<S, int>() == 1); // expected-error {{no matching function for call to 'And1'}}
- // expected-note@#and1 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#and1 {{because substituted constraint expression is ill-formed}}
+ // expected-note@#and1 {{candidate template ignored: constraints not satisfied [with T = <S, int>]}}
+ // expected-note@#and1 {{because 'typename T::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
static_assert(And1<int, S>() == 1); // expected-error {{no matching function for call to 'And1'}}
- // expected-note@#and1 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#and1 {{because substituted constraint expression is ill-formed}}
+ // expected-note@#and1 {{candidate template ignored: constraints not satisfied [with T = <int, S>]}}
+ // expected-note@#and1 {{because 'typename T::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
static_assert(And2<S>() == 2);
static_assert(And2<S, S>() == 2);
-static_assert(And2<int>() == 2);
+static_assert(And2<int>() == 2); // expected-error {{no matching function for call to 'And2'}}
+ // expected-note@#and2 {{candidate template ignored: constraints not satisfied [with T = int, U = <>]}}
+ // expected-note@#and2 {{because 'typename U::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
+
static_assert(And2<int, int>() == 2); // expected-error {{no matching function for call to 'And2'}}
- // expected-note@#and2 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#and2 {{because substituted constraint expression is ill-formed}}
+ // expected-note@#and2 {{candidate template ignored: constraints not satisfied [with T = S, U = <int>]}} \
+ // expected-note@#and2 {{because 'typename U::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
static_assert(And2<S, int>() == 2); // expected-error {{no matching function for call to 'And2'}}
- // expected-note@#and2 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#and2 {{because substituted constraint expression is ill-formed}}
+ // expected-note@#and2 {{candidate template ignored: constraints not satisfied [with T = int, U = <S>]}}
+ // expected-note@#and2 {{because 'typename T::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
static_assert(And2<int, S>() == 2); // expected-error {{no matching function for call to 'And2'}}
- // expected-note@#and2 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#and2 {{because substituted constraint expression is ill-formed}}
+ // expected-note@#and2 {{candidate template ignored: constraints not satisfied [with T = int, U = <int>]}}
+ // expected-note@#and2 {{because 'typename T::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
static_assert(And3<S>() == 3);
static_assert(And3<S, S>() == 3);
static_assert(And3<int>() == 3); // expected-error {{no matching function for call to 'And3'}}
- // expected-note@#and3 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#and3 {{because substituted constraint expression is ill-formed}}
+ // expected-note@#and3 {{candidate template ignored: constraints not satisfied [with T = int, U = <>]}}
+ // expected-note@#and3 {{because 'typename T::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
+
static_assert(And3<int, int>() == 3); // expected-error {{no matching function for call to 'And3'}}
- // expected-note@#and3 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#and3 {{because substituted constraint expression is ill-formed}}
+ // expected-note@#and3 {{candidate template ignored: constraints not satisfied [with T = int, U = <int>]}}
+ // expected-note@#and3 {{because 'typename T::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
+
static_assert(And3<S, int>() == 3); // expected-error {{no matching function for call to 'And3'}}
- // expected-note@#and3 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#and3 {{because substituted constraint expression is ill-formed}}
+ // expected-note@#and3 {{candidate template ignored: constraints not satisfied [with T = S, U = <int>]}}
+ // expected-note@#and3 {{because 'typename U::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
+
static_assert(And3<int, S>() == 3); // expected-error {{no matching function for call to 'And3'}}
- // expected-note@#and3 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#and3 {{because substituted constraint expression is ill-formed}}
+ // expected-note@#and3 {{candidate template ignored: constraints not satisfied [with T = int, U = <S>]}}
+ // expected-note@#and3 {{because 'typename T::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
static_assert(Or1<>() == 1); // expected-error {{no matching function for call to 'Or1'}}
@@ -198,25 +226,26 @@ static_assert(Or1<int, S>() == 1);
static_assert(Or1<S, int>() == 1);
static_assert(Or1<S, S>() == 1);
static_assert(Or1<int>() == 1); // expected-error {{no matching function for call to 'Or1'}}
- // expected-note@#or1 {{candidate template ignored: constraints not satisfied}} \
- // expected-note@#or1 {{because substituted constraint expression is ill-formed}}
-
+ // expected-note@#or1 {{candidate template ignored: constraints not satisfied}}
+ // expected-note@#or1 {{because 'typename T::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
static_assert(Or2<S>() == 2);
static_assert(Or2<int, S>() == 2);
static_assert(Or2<S, int>() == 2);
static_assert(Or2<S, S>() == 2);
static_assert(Or2<int>() == 2); // expected-error {{no matching function for call to 'Or2'}}
- // expected-note@#or2 {{candidate template ignored: constraints not satisfied}} \
- // expected-note@#or2 {{because substituted constraint expression is ill-formed}}
-
+ // expected-note@#or2 {{candidate template ignored: constraints not satisfied [with T = int, U = <>]}}
+ // expected-note@#or2 {{because 'typename T::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
static_assert(Or3<S>() == 3);
static_assert(Or3<int, S>() == 3);
static_assert(Or3<S, int>() == 3);
static_assert(Or3<S, S>() == 3);
static_assert(Or3<int>() == 3); // expected-error {{no matching function for call to 'Or3'}}
- // expected-note@#or3 {{candidate template ignored: constraints not satisfied}} \
- // expected-note@#or3 {{because substituted constraint expression is ill-formed}}
+ // expected-note@#or3 {{candidate template ignored: constraints not satisfied}}
+ // expected-note@#or3 {{because 'typename T::type' does not satisfy 'C'}}
+ // expected-note@#C {{because 'T' does not satisfy 'A'}}
}
namespace bool_conversion_break {
@@ -226,7 +255,7 @@ struct Thingy {
static constexpr int compare(const Thingy&) {return 1;}
};
template <typename ...T, typename ...U>
-void f(A<T ...> *, A<U ...> *) // expected-note {{candidate template ignored: failed template argument deduction}}
+void f(A<T ...> *, A<U ...> *) // expected-note {{candidate template ignored: constraints not satisfied}}
requires (T::compare(U{}) && ...); // expected-error {{atomic constraint must be of type 'bool' (found 'int')}}
void g() {
@@ -269,9 +298,7 @@ struct S {
static_assert(S<int>::f<int>() == 2);
-static_assert(S<int>::g<int>() == 2); // expected-error {{call to 'g' is ambiguous}}
- // expected-note@#nested-ambiguous-g1 {{candidate}}
- // expected-note@#nested-ambiguous-g2 {{candidate}}
+static_assert(S<int>::g<int>() == 2);
}
@@ -384,3 +411,98 @@ struct LazyLitMatrix<index_by<Indices...>, init> {
}
}
+
+namespace GH135190 {
+template <typename T>
+concept A = __is_same_as(T, int) || __is_same_as(T, double) ;
+
+template <typename T>
+concept B = A<T> && __is_same_as(T, double);
+
+template <class... Ts>
+requires(A<Ts> && ...)
+constexpr int g() {
+ return 1;
+}
+
+template <class... Ts>
+requires(B<Ts> && ...)
+constexpr int g() {
+ return 2;
+}
+
+static_assert(g<double>() == 2);
+
+
+template <class... Ts>
+concept all_A = (A<Ts> && ...);
+
+template <class... Ts>
+concept all_B = (B<Ts> && ...);
+
+template <class... Ts>
+requires all_A<Ts...>
+constexpr int h() {
+ return 1;
+}
+
+template <class... Ts>
+requires all_B<Ts...>
+constexpr int h() {
+ return 2;
+}
+
+static_assert(h<double>() == 2);
+}
+
+
+namespace parameter_mapping_regressions {
+
+namespace case1 {
+namespace std {
+template <class _Tp, class... _Args>
+constexpr bool is_constructible_v = __is_constructible(_Tp, _Args...);
+template <class _Tp, class... _Args>
+concept constructible_from = is_constructible_v<_Tp, _Args...>;
+template <class _Tp>
+concept default_initializable = true;
+template <class> using iterator_t = int;
+template <class _Tp>
+concept view = constructible_from<_Tp, _Tp>;
+template <class... _Views>
+ requires(view<_Views> && ...)
+class zip_transform_view;
+} // namespace std
+struct IterDefaultCtrView {};
+template <class... Views>
+using Iter = std::iterator_t<std::zip_transform_view<Views...>>;
+static_assert(
+ std::default_initializable<Iter<IterDefaultCtrView, IterDefaultCtrView>>);
+
+}
+
+namespace case2 {
+
+template <class _Bp>
+constexpr bool False = false;
+
+template <class... _Views>
+concept __zip_all_random_access = (False<_Views> && ...);
+// expected-note@-1 {{evaluated to false}}
+
+template <typename... _Views>
+struct zip_view {
+ void f() requires __zip_all_random_access<_Views...>{};
+ // expected-note@-1 {{because 'int' does not satisfy}}
+};
+
+zip_view<int> test_v;
+static_assert(!__zip_all_random_access<int>);
+
+void test() {
+ test_v.f(); // expected-error {{invalid reference to function 'f'}}
+}
+
+}
+
+}
diff --git a/clang/test/SemaCXX/cxx2c-template-template-param.cpp b/clang/test/SemaCXX/cxx2c-template-template-param.cpp
index ed55a059..4ad3fd9 100644
--- a/clang/test/SemaCXX/cxx2c-template-template-param.cpp
+++ b/clang/test/SemaCXX/cxx2c-template-template-param.cpp
@@ -106,7 +106,7 @@ concept BinaryDefaultedFalse = false;
template <template <typename...> concept C, typename T>
struct S {
- template <C TT> // expected-note {{because 'int' does not satisfy 'UnaryFalse'}}
+ template <C TT> // expected-note 2{{because 'int' does not satisfy 'UnaryFalse'}}
void f(TT); // expected-note {{ignored}}
void g(C auto); // expected-note {{ignored}} \
// expected-note {{because 'int' does not satisfy 'UnaryFalse'}}
@@ -171,7 +171,7 @@ concept BinaryDefaultedFalse = false;
template <template <typename...> concept C, typename T>
struct S {
- template <C TT> // expected-note {{because 'int' does not satisfy 'UnaryFalse'}}
+ template <C TT> // expected-note 2{{because 'int' does not satisfy 'UnaryFalse'}}
void f(TT); // expected-note {{ignored}}
void g(C auto); // expected-note {{ignored}} \
// expected-note {{because 'int' does not satisfy 'UnaryFalse'}}
diff --git a/clang/test/SemaCXX/cxx98-compat.cpp b/clang/test/SemaCXX/cxx98-compat.cpp
index 8e7acf7..587c242 100644
--- a/clang/test/SemaCXX/cxx98-compat.cpp
+++ b/clang/test/SemaCXX/cxx98-compat.cpp
@@ -1,6 +1,7 @@
-// RUN: %clang_cc1 -fsyntax-only -std=c++11 -Wc++98-compat -verify %s
-// RUN: %clang_cc1 -fsyntax-only -std=c++14 -Wc++98-compat -verify %s -DCXX14COMPAT
-// RUN: %clang_cc1 -fsyntax-only -std=c++17 -Wc++98-compat -verify %s -DCXX14COMPAT -DCXX17COMPAT
+// RUN: %clang_cc1 -fsyntax-only -std=c++11 -Wc++98-compat -verify=expected,not-cpp20 %s
+// RUN: %clang_cc1 -fsyntax-only -std=c++14 -Wc++98-compat -verify=expected,not-cpp20 %s -DCXX14COMPAT
+// RUN: %clang_cc1 -fsyntax-only -std=c++17 -Wc++98-compat -verify=expected,not-cpp20 %s -DCXX14COMPAT -DCXX17COMPAT
+// RUN: %clang_cc1 -fsyntax-only -std=c++20 -Wc++98-compat -verify=expected,cpp20 %s -DCXX14COMPAT -DCXX17COMPAT
namespace std {
struct type_info;
@@ -226,7 +227,8 @@ void TrivialButNonPODThroughEllipsis() {
}
struct HasExplicitConversion {
- explicit operator bool(); // expected-warning {{explicit conversion functions are incompatible with C++98}}
+ // FIXME I think we should generate this diagnostic in C++20
+ explicit operator bool(); // not-cpp20-warning {{explicit conversion functions are incompatible with C++98}}
};
struct Struct {};
@@ -430,3 +432,12 @@ void ctad_test() {
CTAD t = s; // expected-warning {{class template argument deduction is incompatible with C++ standards before C++17}}
}
#endif
+
+namespace GH161702 {
+struct S {
+ enum E { A };
+ using E::A; // expected-warning {{enumeration type in nested name specifier is incompatible with C++98}}
+ // not-cpp20-error@-1 {{using declaration refers to its own class}}
+ // cpp20-warning@-2 {{member using declaration naming non-class ''E'' enumerator is incompatible with C++ standards before C++20}}
+};
+}
diff --git a/clang/test/SemaCXX/integer-overflow.cpp b/clang/test/SemaCXX/integer-overflow.cpp
index 73a4e88..214dc11 100644
--- a/clang/test/SemaCXX/integer-overflow.cpp
+++ b/clang/test/SemaCXX/integer-overflow.cpp
@@ -171,7 +171,7 @@ uint64_t check_integer_overflows(int i) { //expected-note 0+{{declared here}}
uint64_t a[10];
a[4608 * 1024 * 1024] = 1;
#if __cplusplus < 201103L
-// expected-warning@-2 {{array index 536870912 is past the end of the array (that has type 'uint64_t[10]' (aka 'unsigned long long[10]'))}}
+// expected-warning@-2 {{array index 536'870'912 is past the end of the array (that has type 'uint64_t[10]' (aka 'unsigned long long[10]'))}}
// expected-note@-4 {{array 'a' declared here}}
#endif
diff --git a/clang/test/SemaCXX/invalid-requirement-requires-expr.cpp b/clang/test/SemaCXX/invalid-requirement-requires-expr.cpp
index 436dfb9..8400340 100644
--- a/clang/test/SemaCXX/invalid-requirement-requires-expr.cpp
+++ b/clang/test/SemaCXX/invalid-requirement-requires-expr.cpp
@@ -1,6 +1,6 @@
// RUN: %clang -fsyntax-only -std=c++2a -Xclang -verify -ftemplate-depth=5 -ftemplate-backtrace-limit=4 %s
-// RequiresExpr contains invalid requirement. (Eg. Highly recurisive template).
+// RequiresExpr contains invalid requirement. (Eg. Highly recursive template).
template<int x>
struct A { static constexpr bool far(); };
class B {
@@ -19,7 +19,7 @@ constexpr bool A<x>::far() {
// expected-error@#Invalid {{recursive template instantiation exceeded maximum depth}}
// expected-note@#Invalid 3 {{while}}
// expected-note@#Invalid {{contexts in backtrace}}
- // expected-note@#Invalid {{increase recursive template instantiation depth}}
+ // expected-note@#Invalid {{use -ftemplate-depth=N to increase}}
};
}
static_assert(A<1>::far());
diff --git a/clang/test/SemaCXX/overload-resolution-deferred-templates.cpp b/clang/test/SemaCXX/overload-resolution-deferred-templates.cpp
index 135865c..c3bda39 100644
--- a/clang/test/SemaCXX/overload-resolution-deferred-templates.cpp
+++ b/clang/test/SemaCXX/overload-resolution-deferred-templates.cpp
@@ -102,7 +102,7 @@ static_assert(__is_constructible(Movable, int));
// expected-error@-1 {{no matching constructor for initialization of 'Movable'}} \
// expected-note@-1 2{{}}
// expected-error@#err-self-constraint-1{{satisfaction of constraint '__is_constructible(Movable, T)' depends on itself}}
-// expected-note@#err-self-constraint-1 4{{}}
+// expected-note@#err-self-constraint-1 3{{}}
// expected-note@#Movable {{'Movable' defined here}}
template <typename T>
@@ -200,7 +200,6 @@ void h(short n) { f(n); }
// expected-note@-1{{while checking constraint satisfaction for template}}
// expected-note@#GH62096-note1{{in instantiation}}
// expected-note@#GH62096-note1{{while substituting template arguments into constraint expression here}}
-// expected-note@#GH62096-note2{{while substituting template arguments into constraint expression here}}
// expected-note@#GH62096-note2{{while checking the satisfaction of concept}}
// expected-note@#GH62096-err {{expression evaluates}}
}
diff --git a/clang/test/SemaCXX/type-traits.cpp b/clang/test/SemaCXX/type-traits.cpp
index d49330f..901d510 100644
--- a/clang/test/SemaCXX/type-traits.cpp
+++ b/clang/test/SemaCXX/type-traits.cpp
@@ -5129,12 +5129,12 @@ namespace GH121278 {
#if __cplusplus >= 202002L
template <typename B, typename D>
concept C = __is_base_of(B, D);
-// expected-error@-1 {{incomplete type 'GH121278::S' used in type trait expression}}
+// expected-error@-1 {{incomplete type 'S' used in type trait expression}}
// expected-note@-2 {{while substituting template arguments into constraint expression here}}
struct T;
struct S;
bool b = C<T, S>;
-// expected-note@-1 {{while checking the satisfaction of concept 'C<GH121278::T, GH121278::S>' requested here}}
+// expected-note@-1 {{while checking the satisfaction of concept 'C<T, S>' requested here}}
#endif
}
diff --git a/clang/test/SemaHLSL/BuiltIns/Buffers.hlsl b/clang/test/SemaHLSL/BuiltIns/Buffers.hlsl
index d7c6876..999372c 100644
--- a/clang/test/SemaHLSL/BuiltIns/Buffers.hlsl
+++ b/clang/test/SemaHLSL/BuiltIns/Buffers.hlsl
@@ -19,7 +19,7 @@ Buffer<double2> r4;
// expected-error@+4 {{constraints not satisfied for class template 'Buffer'}}
// expected-note@*:* {{template declaration from hidden source: template <typename element_type> requires __is_typed_resource_element_compatible<element_type> class Buffer}}
-// expected-note@*:* {{because 'hlsl::Buffer<int>' does not satisfy '__is_typed_resource_element_compatible'}}
+// expected-note@*:* {{because 'Buffer<int>' does not satisfy '__is_typed_resource_element_compatible'}}
// expected-note@*:* {{because '__builtin_hlsl_is_typed_resource_element_compatible(hlsl::Buffer<int>)' evaluated to false}}
Buffer<Buffer<int> > r5;
@@ -65,7 +65,7 @@ Buffer<half[4]> r10;
typedef vector<int, 8> int8;
// expected-error@+3 {{constraints not satisfied for class template 'Buffer'}}
-// expected-note@*:* {{because 'vector<int, 8>' (vector of 8 'int' values) does not satisfy '__is_typed_resource_element_compatible'}}
+// expected-note@*:* {{because 'int8' (aka 'vector<int, 8>') does not satisfy '__is_typed_resource_element_compatible'}}
// expected-note@*:* {{because '__builtin_hlsl_is_typed_resource_element_compatible(vector<int, 8>)' evaluated to false}}
Buffer<int8> r11;
@@ -90,7 +90,7 @@ enum numbers { one, two, three };
Buffer<numbers> r15;
// expected-error@+3 {{constraints not satisfied for class template 'Buffer'}}
-// expected-note@*:* {{because 'vector<double, 3>' (vector of 3 'double' values) does not satisfy '__is_typed_resource_element_compatible'}}
+// expected-note@*:* {{because 'double3' (aka 'vector<double, 3>') does not satisfy '__is_typed_resource_element_compatible'}}
// expected-note@*:* {{because '__builtin_hlsl_is_typed_resource_element_compatible(vector<double, 3>)' evaluated to false}}
Buffer<double3> r16;
diff --git a/clang/test/SemaHLSL/BuiltIns/RWBuffers.hlsl b/clang/test/SemaHLSL/BuiltIns/RWBuffers.hlsl
index 361f4303..b33f2af 100644
--- a/clang/test/SemaHLSL/BuiltIns/RWBuffers.hlsl
+++ b/clang/test/SemaHLSL/BuiltIns/RWBuffers.hlsl
@@ -19,7 +19,7 @@ RWBuffer<double2> r4;
// expected-error@+4 {{constraints not satisfied for class template 'RWBuffer'}}
// expected-note@*:* {{template declaration from hidden source: template <typename element_type> requires __is_typed_resource_element_compatible<element_type> class RWBuffer}}
-// expected-note@*:* {{because 'hlsl::RWBuffer<int>' does not satisfy '__is_typed_resource_element_compatible'}}
+// expected-note@*:* {{because 'RWBuffer<int>' does not satisfy '__is_typed_resource_element_compatible'}}
// expected-note@*:* {{because '__builtin_hlsl_is_typed_resource_element_compatible(hlsl::RWBuffer<int>)' evaluated to false}}
RWBuffer<RWBuffer<int> > r5;
@@ -65,7 +65,7 @@ RWBuffer<half[4]> r10;
typedef vector<int, 8> int8;
// expected-error@+3 {{constraints not satisfied for class template 'RWBuffer'}}
-// expected-note@*:* {{because 'vector<int, 8>' (vector of 8 'int' values) does not satisfy '__is_typed_resource_element_compatible'}}
+// expected-note@*:* {{because 'int8' (aka 'vector<int, 8>') does not satisfy '__is_typed_resource_element_compatible'}}
// expected-note@*:* {{because '__builtin_hlsl_is_typed_resource_element_compatible(vector<int, 8>)' evaluated to false}}
RWBuffer<int8> r11;
@@ -90,7 +90,7 @@ enum numbers { one, two, three };
RWBuffer<numbers> r15;
// expected-error@+3 {{constraints not satisfied for class template 'RWBuffer'}}
-// expected-note@*:* {{because 'vector<double, 3>' (vector of 3 'double' values) does not satisfy '__is_typed_resource_element_compatible'}}
+// expected-note@*:* {{because 'double3' (aka 'vector<double, 3>') does not satisfy '__is_typed_resource_element_compatible'}}
// expected-note@*:* {{because '__builtin_hlsl_is_typed_resource_element_compatible(vector<double, 3>)' evaluated to false}}
RWBuffer<double3> r16;
diff --git a/clang/test/SemaHLSL/RootSignature-err.hlsl b/clang/test/SemaHLSL/RootSignature-err.hlsl
index 89c684c..debeafe 100644
--- a/clang/test/SemaHLSL/RootSignature-err.hlsl
+++ b/clang/test/SemaHLSL/RootSignature-err.hlsl
@@ -191,6 +191,10 @@ void basic_validation_5() {}
[RootSignature("StaticSampler(s0, mipLODBias = 15.990001)")]
void basic_validation_6() {}
+// expected-error@+1 {{invalid value of flags}}
+[RootSignature("StaticSampler(s0, flags = FLAG_TYPO)")]
+void basic_validation_7() {}
+
// expected-error@+1 {{sampler and non-sampler resource mixed in descriptor table}}
[RootSignature("DescriptorTable(Sampler(s0), CBV(b0))")]
void mixed_resource_table() {}
diff --git a/clang/test/SemaHLSL/RootSignature-flags-err.hlsl b/clang/test/SemaHLSL/RootSignature-flags-err.hlsl
index 9449d33..c79e692 100644
--- a/clang/test/SemaHLSL/RootSignature-flags-err.hlsl
+++ b/clang/test/SemaHLSL/RootSignature-flags-err.hlsl
@@ -2,7 +2,8 @@
// RUN: -fdx-rootsignature-version=rootsig_1_0 %s -verify=v10
// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -x hlsl -fsyntax-only \
// RUN: -fdx-rootsignature-version=rootsig_1_1 %s -verify=v11
-
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -x hlsl -fsyntax-only \
+// RUN: -fdx-rootsignature-version=rootsig_1_2 %s -verify=v12
// Root Descriptor Flags:
// v10-error@+1 {{invalid flags for version 1.0}}
@@ -13,8 +14,9 @@ void bad_root_descriptor_flags_0() {}
[RootSignature("CBV(b0, flags = DATA_STATIC_WHILE_SET_AT_EXECUTE)")]
void bad_root_descriptor_flags_1() {}
-// v10-error@+2 {{invalid flags for version 1.0}}
-// v11-error@+1 {{invalid flags for version 1.1}}
+// v10-error@+3 {{invalid flags for version 1.0}}
+// v11-error@+2 {{invalid flags for version 1.1}}
+// v12-error@+1 {{invalid flags for version 1.2}}
[RootSignature("CBV(b0, flags = DATA_STATIC | DATA_VOLATILE)")]
void bad_root_descriptor_flags_2() {}
@@ -40,18 +42,20 @@ void bad_descriptor_range_flags_3() {}
[RootSignature("DescriptorTable(CBV(b0, flags = DESCRIPTORS_STATIC_KEEPING_BUFFER_BOUNDS_CHECKS))")]
void bad_descriptor_range_flags_4() {}
-// v10-error@+2 {{invalid flags for version 1.0}}
-// v11-error@+1 {{invalid flags for version 1.1}}
+// v10-error@+3 {{invalid flags for version 1.0}}
+// v11-error@+2 {{invalid flags for version 1.1}}
+// v12-error@+1 {{invalid flags for version 1.2}}
[RootSignature("DescriptorTable(CBV(b0, flags = DATA_STATIC | DATA_STATIC_WHILE_SET_AT_EXECUTE))")]
void bad_descriptor_range_flags_5() {}
-// v10-error@+2 {{invalid flags for version 1.0}}
-// v11-error@+1 {{invalid flags for version 1.1}}
+// v10-error@+3 {{invalid flags for version 1.0}}
+// v11-error@+2 {{invalid flags for version 1.1}}
+// v12-error@+1 {{invalid flags for version 1.2}}
[RootSignature("DescriptorTable(CBV(b0, flags = DESCRIPTORS_VOLATILE | DESCRIPTORS_STATIC_KEEPING_BUFFER_BOUNDS_CHECKS))")]
void bad_descriptor_range_flags_6() {}
-// v10-error@+2 {{invalid flags for version 1.0}}
-// v11-error@+1 {{invalid flags for version 1.1}}
+// v10-error@+3 {{invalid flags for version 1.0}}
+// v11-error@+2 {{invalid flags for version 1.1}}
+// v12-error@+1 {{invalid flags for version 1.2}}
[RootSignature("DescriptorTable(CBV(b0, flags = DESCRIPTORS_VOLATILE | DATA_STATIC))")]
void bad_descriptor_range_flags_7() {}
-
diff --git a/clang/test/SemaTemplate/GH161657.cpp b/clang/test/SemaTemplate/GH161657.cpp
new file mode 100644
index 0000000..5ad4dde
--- /dev/null
+++ b/clang/test/SemaTemplate/GH161657.cpp
@@ -0,0 +1,11 @@
+// RUN: %clang_cc1 -triple=x86_64 -fsyntax-only -std=c++20 -ffp-exception-behavior=strict -verify %s
+// expected-no-diagnostics
+
+template <class T> struct S {
+ template <class U> using type1 = decltype([] { return U{}; });
+};
+
+void foo() {
+ using T1 = S<int>::type1<int>;
+ int x = T1()();
+}
diff --git a/clang/test/SemaTemplate/concepts-recovery-expr.cpp b/clang/test/SemaTemplate/concepts-recovery-expr.cpp
index 6bed179..aa4ed53 100644
--- a/clang/test/SemaTemplate/concepts-recovery-expr.cpp
+++ b/clang/test/SemaTemplate/concepts-recovery-expr.cpp
@@ -4,7 +4,7 @@
constexpr bool CausesRecoveryExpr = "test" + 1.0f;
template<typename T>
-concept ReferencesCRE = CausesRecoveryExpr;
+concept ReferencesCRE = CausesRecoveryExpr; // #subst1
template<typename T> requires CausesRecoveryExpr // #NVC1REQ
void NoViableCands1(){} // #NVC1
@@ -19,16 +19,18 @@ void NVCUse() {
NoViableCands1<int>();
// expected-error@-1 {{no matching function for call to 'NoViableCands1'}}
// expected-note@#NVC1{{candidate template ignored: constraints not satisfied}}
+ // expected-note@#NVC2REQ{{because 'int' does not satisfy 'ReferencesCRE'}}
// expected-note@#NVC1REQ{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
NoViableCands2<int>();
// expected-error@-1 {{no matching function for call to 'NoViableCands2'}}
// expected-note@#NVC2{{candidate template ignored: constraints not satisfied}}
- // expected-note@#NVC2REQ{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // expected-note@#subst1{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
NoViableCands3<int>();
// expected-error@-1 {{no matching function for call to 'NoViableCands3'}}
// expected-note@#NVC3{{candidate template ignored: constraints not satisfied}}
- // expected-note@#NVC3REQ{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // expected-note@#NVC3REQ{{because 'int' does not satisfy 'ReferencesCRE'}}
+ // expected-note@#subst1{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
}
template<typename T> requires CausesRecoveryExpr // #OVC1REQ
@@ -58,12 +60,14 @@ void OVCUse() {
// expected-error@-1 {{no matching function for call to 'OtherViableCands2'}}
// expected-note@#OVC2_ALT {{candidate function}}
// expected-note@#OVC2 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#OVC2REQ{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // expected-note@#OVC2REQ{{because 'int' does not satisfy 'ReferencesCRE'}}
+ // expected-note@#subst1{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
OtherViableCands3<int>();
// expected-error@-1 {{no matching function for call to 'OtherViableCands3'}}
// expected-note@#OVC3_ALT {{candidate function}}
// expected-note@#OVC3 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#OVC3REQ{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // expected-note@#OVC3REQ{{because 'int' does not satisfy 'ReferencesCRE'}}
+ // expected-note@#subst1{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
}
template<typename T> requires CausesRecoveryExpr // #OBNVC1REQ
@@ -95,13 +99,15 @@ void OBNVCUse() {
// expected-note@#OBNVC2_ALT {{candidate template ignored: constraints not satisfied}}
// expected-note@#OBNVC2REQ_ALT {{because 'false' evaluated to false}}
// expected-note@#OBNVC2 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#OBNVC2REQ{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // expected-note@#OBNVC2REQ{{because 'int' does not satisfy 'ReferencesCRE'}}
+ // expected-note@#subst1{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
OtherBadNoViableCands3<int>();
// expected-error@-1 {{no matching function for call to 'OtherBadNoViableCands3'}}
// expected-note@#OBNVC3_ALT {{candidate template ignored: constraints not satisfied}}
// expected-note@#OBNVC3REQ_ALT {{because 'false' evaluated to false}}
// expected-note@#OBNVC3 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#OBNVC3REQ{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // expected-note@#OBNVC3REQ{{because 'int' does not satisfy 'ReferencesCRE'}}
+ // expected-note@#subst1{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
}
@@ -136,12 +142,14 @@ void MemOVCUse() {
// expected-error@-1 {{no matching member function for call to 'OtherViableCands2'}}
// expected-note@#MEMOVC2_ALT {{candidate function}}
// expected-note@#MEMOVC2 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#MEMOVC2REQ{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // expected-note@#MEMOVC2REQ{{because 'int' does not satisfy 'ReferencesCRE'}}
+ // expected-note@#subst1{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
S.OtherViableCands3<int>();
// expected-error@-1 {{no matching member function for call to 'OtherViableCands3'}}
// expected-note@#MEMOVC3_ALT {{candidate function}}
// expected-note@#MEMOVC3 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#MEMOVC3REQ{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // expected-note@#MEMOVC3REQ{{because 'int' does not satisfy 'ReferencesCRE'}}
+ // expected-note@#subst1{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
}
struct StaticOVC {
@@ -173,12 +181,14 @@ void StaticMemOVCUse() {
// expected-error@-1 {{no matching function for call to 'OtherViableCands2'}}
// expected-note@#SMEMOVC2_ALT {{candidate function}}
// expected-note@#SMEMOVC2 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#SMEMOVC2REQ{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // expected-note@#SMEMOVC2REQ{{because 'int' does not satisfy 'ReferencesCRE'}}
+ // expected-note@#subst1{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
StaticOVC::OtherViableCands3<int>();
// expected-error@-1 {{no matching function for call to 'OtherViableCands3'}}
// expected-note@#SMEMOVC3_ALT {{candidate function}}
// expected-note@#SMEMOVC3 {{candidate template ignored: constraints not satisfied}}
- // expected-note@#SMEMOVC3REQ{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // expected-note@#SMEMOVC3REQ{{because 'int' does not satisfy 'ReferencesCRE'}}
+ // expected-note@#subst1{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
}
namespace GH58548 {
diff --git a/clang/test/SemaTemplate/concepts-recursive-inst.cpp b/clang/test/SemaTemplate/concepts-recursive-inst.cpp
index 097cad1..73dce93 100644
--- a/clang/test/SemaTemplate/concepts-recursive-inst.cpp
+++ b/clang/test/SemaTemplate/concepts-recursive-inst.cpp
@@ -12,7 +12,7 @@ void g() {
// expected-note@#FDEF{{because 'int' does not satisfy 'c'}}
// expected-note@#CDEF{{because 'f(t)' would be invalid: no matching function for call to 'f'}}
}
-} // namespace GH53213
+} // namespace GH53213
namespace GH45736 {
struct constrained;
@@ -67,15 +67,14 @@ struct my_range{
void baz() {
auto it = begin(rng); // #BEGIN_CALL
-// expected-error@#INF_BEGIN {{satisfaction of constraint 'Inf<Inf auto>' depends on itself}}
-// expected-note@#INF_BEGIN {{while substituting template arguments into constraint expression here}}
+// expected-error-re@#INF_REQ {{satisfaction of constraint {{.*}} depends on itself}}
+// expected-note@#INF_BEGIN {{while checking the satisfaction of concept 'Inf<DirectRecursiveCheck::my_range>' requested here}}
// expected-note@#INF_BEGIN_EXPR {{while checking constraint satisfaction for template 'begin<DirectRecursiveCheck::my_range>' required here}}
// expected-note@#INF_BEGIN_EXPR {{while substituting deduced template arguments into function template 'begin'}}
// expected-note@#INF_BEGIN_EXPR {{in instantiation of requirement here}}
// expected-note@#INF_REQ {{while substituting template arguments into constraint expression here}}
-// expected-note@#INF_BEGIN {{while checking the satisfaction of concept 'Inf<DirectRecursiveCheck::my_range>' requested here}}
-// expected-note@#INF_BEGIN {{while substituting template arguments into constraint expression here}}
-// expected-note@#BEGIN_CALL {{while checking constraint satisfaction for template 'begin<DirectRecursiveCheck::my_range>' required here}}
+// expected-note@#INF_BEGIN {{while checking the satisfaction of concept 'Inf<struct my_range>' requested here}}
+// expected-note@#BEGIN_CALL {{while checking constraint satisfaction for template 'begin<struct my_range>' required here}}
// expected-note@#BEGIN_CALL {{while substituting deduced template arguments into function template}}
// Fallout of the failure is failed lookup, which is necessary to stop odd
@@ -83,6 +82,7 @@ auto it = begin(rng); // #BEGIN_CALL
// expected-error@#BEGIN_CALL {{no matching function for call to 'begin'}}
// expected-note@#NOTINF_BEGIN {{candidate function}}
// expected-note@#INF_BEGIN{{candidate template ignored: constraints not satisfied}}
+// expected-note@#INF_BEGIN{{because 'Inf auto' does not satisfy 'Inf}}
}
} // namespace DirectRecursiveCheck
@@ -100,16 +100,17 @@ namespace GH50891 {
static_assert(Numeric<Deferred>); // #STATIC_ASSERT
// expected-error@#NUMERIC{{satisfaction of constraint 'requires (T a) { foo(a); }' depends on itself}}
// expected-note@#NUMERIC {{while substituting template arguments into constraint expression here}}
- // expected-note@#OP_TO {{while checking the satisfaction of concept 'Numeric<GH50891::Deferred>' requested here}}
- // expected-note@#OP_TO {{while substituting template arguments into constraint expression here}}
- // expected-note@#FOO_CALL {{while checking constraint satisfaction for template}}
- // expected-note@#FOO_CALL {{while substituting deduced template arguments into function template}}
- // expected-note@#FOO_CALL {{in instantiation of requirement here}}
+ // expected-note@#OP_TO {{while checking the satisfaction of concept 'Numeric<Deferred>' requested here}}
+ // expected-note@#OP_TO {{skipping 1 context}}
+ // expected-note@#FOO_CALL 2{{while checking constraint satisfaction for template}}
+ // expected-note@#FOO_CALL 2{{while substituting deduced template arguments into function template}}
+ // expected-note@#FOO_CALL 2{{in instantiation of requirement here}}
// expected-note@#NUMERIC {{while substituting template arguments into constraint expression here}}
// expected-error@#STATIC_ASSERT {{static assertion failed}}
- // expected-note@#STATIC_ASSERT{{while checking the satisfaction of concept 'Numeric<GH50891::Deferred>' requested here}}
- // expected-note@#STATIC_ASSERT{{because substituted constraint expression is ill-formed: constraint depends on a previously diagnosed expression}}
+ // expected-note@#STATIC_ASSERT{{while checking the satisfaction of concept 'Numeric<Deferred>' requested here}}
+ // expected-note@#STATIC_ASSERT{{because 'Deferred' does not satisfy 'Numeric'}}
+ // expected-note@#FOO_CALL{{because 'foo(a)' would be invalid}}
} // namespace GH50891
diff --git a/clang/test/SemaTemplate/concepts.cpp b/clang/test/SemaTemplate/concepts.cpp
index 209e7dc..6d29f8b 100644
--- a/clang/test/SemaTemplate/concepts.cpp
+++ b/clang/test/SemaTemplate/concepts.cpp
@@ -1002,7 +1002,7 @@ template<class>
concept Irrelevant = false;
template <typename T>
-concept ErrorRequires = requires(ErrorRequires auto x) { x; };
+concept ErrorRequires = requires(ErrorRequires auto x) { x; }; //#GH54678-ill-formed-concept
// expected-error@-1 {{a concept definition cannot refer to itself}} \
// expected-error@-1 {{'auto' not allowed in requires expression parameter}} \
// expected-note@-1 {{declared here}}
@@ -1023,8 +1023,7 @@ template<class T> void eee(T t) // expected-note {{candidate template ignored: c
requires (Irrelevant<T> || Irrelevant<T> || True<T>) && False<T> {} // expected-note {{'long' does not satisfy 'False'}}
template<class T> void fff(T t) // expected-note {{candidate template ignored: constraints not satisfied}}
-requires((ErrorRequires<T> || False<T> || True<T>) && False<T>) {} // expected-note {{'unsigned long' does not satisfy 'False'}}
-
+requires((ErrorRequires<T> || False<T> || True<T>) && False<T>) {} // expected-note {{because 'unsigned long' does not satisfy 'False'}}
void test() {
aaa(42); // expected-error {{no matching function}}
bbb(42L); // expected-error{{no matching function}}
@@ -1264,12 +1263,7 @@ C auto x = 0;
// expected-error@#T_Type {{type 'int' cannot be used prior to '::'}} \
// expected-note@-1 {{in instantiation of default argument}}
-// This will be fixed when we merge https://github.com/llvm/llvm-project/pull/141776
-// Which makes us behave like GCC.
static_assert(f(0));
-// expected-error@-1 {{no matching function for call}} \
-// expected-note@#GH61824_f {{constraints not satisfied}} \
-// expected-note@#T_Type {{type 'int' cannot be used prior to '::'}}
}
@@ -1278,4 +1272,65 @@ template <typename T> concept PerfectSquare = [](){} // expected-note 2{{here}}
([](auto) { return true; }) < PerfectSquare <class T>;
// expected-error@-1 {{declaration of 'T' shadows template parameter}} \
// expected-error@-1 {{a concept definition cannot refer to itself}}
+
+}
+namespace GH61811{
+template <class T> struct A { static const int x = 42; };
+template <class Ta> concept A42 = A<Ta>::x == 42;
+template <class Tv> concept Void = __is_same_as(Tv, void);
+template <class Tb, class Ub> concept A42b = Void<Tb> || A42<Ub>;
+template <class Tc> concept R42c = A42b<Tc, Tc&>;
+static_assert (R42c<void>);
+}
+
+namespace parameter_mapping_regressions {
+
+namespace case1 {
+
+template <template <class> class> using __meval = struct __q;
+template <template <class> class _Tp>
+concept __mvalid = requires { typename __meval<_Tp>; };
+template <class _Fn>
+concept __minvocable = __mvalid<_Fn::template __f>;
+template <class...> struct __mdefer_;
+template <class _Fn, class... _Args>
+ requires __minvocable<_Fn>
+struct __mdefer_<_Fn, _Args...> {};
+template <class = __q> struct __mtransform {
+ template <class> using __f = int;
+};
+struct __completion_domain_or_none_ : __mdefer_<__mtransform<>> {};
+
+}
+
+namespace case2 {
+
+template<auto& Q, class P> concept C = Q.template operator()<P>();
+template<class P> concept E = C<[]<class Ty>{ return false; }, P>;
+static_assert(!E<int>);
+
+}
+
+
+namespace case3 {
+template <class> constexpr bool is_move_constructible_v = false;
+
+template <class _Tp>
+concept __cpp17_move_constructible = is_move_constructible_v<_Tp>; // #is_move_constructible_v
+
+template <class _Tp>
+concept __cpp17_copy_constructible = __cpp17_move_constructible<_Tp>; // #__cpp17_move_constructible
+
+template <class _Iter>
+concept __cpp17_iterator = __cpp17_copy_constructible<_Iter>; // #__cpp17_copy_constructible
+
+struct not_move_constructible {};
+static_assert(__cpp17_iterator<not_move_constructible>); \
+// expected-error {{static assertion failed}} \
+// expected-note {{because 'not_move_constructible' does not satisfy '__cpp17_iterator'}} \
+// expected-note@#__cpp17_copy_constructible {{because 'not_move_constructible' does not satisfy '__cpp17_copy_constructible'}} \
+// expected-note@#__cpp17_move_constructible {{because 'parameter_mapping_regressions::case3::not_move_constructible' does not satisfy '__cpp17_move_constructible'}} \
+// expected-note@#is_move_constructible_v {{because 'is_move_constructible_v<parameter_mapping_regressions::case3::not_move_constructible>' evaluated to false}}
+}
+
}
diff --git a/clang/test/SemaTemplate/deduction-guide.cpp b/clang/test/SemaTemplate/deduction-guide.cpp
index e2b586e..9e5756f 100644
--- a/clang/test/SemaTemplate/deduction-guide.cpp
+++ b/clang/test/SemaTemplate/deduction-guide.cpp
@@ -574,8 +574,9 @@ static_assert(x.size == 4);
// CHECK-NEXT: | |-ParmVarDecl 0x{{.+}} <col:18, col:24> col:21 'U (&)[3]'
// CHECK-NEXT: | `-ConceptSpecializationExpr 0x{{.+}} <col:36, col:42> 'bool' Concept 0x{{.+}} 'True'
// CHECK-NEXT: | |-ImplicitConceptSpecializationDecl 0x{{.+}} <{{.+}}> col:28
-// CHECK-NEXT: | | `-TemplateArgument type 'type-parameter-0-0'
-// CHECK-NEXT: | | `-TemplateTypeParmType 0x{{.+}} 'type-parameter-0-0' dependent depth 0 index 0
+// CHECK-NEXT: | | `-TemplateArgument type 'T'
+// CHECK-NEXT: | | `-TemplateTypeParmType 0x{{.+}} 'T' dependent depth 0 index 0
+// CHECK-NEXT: | | `-TemplateTypeParm 0x{{.+}} 'T'
// CHECK-NEXT: | `-TemplateArgument <{{.+}}> type 'T':'type-parameter-0-0'
// CHECK-NEXT: | `-TemplateTypeParmType 0x{{.+}} 'T' dependent depth 0 index 0
// CHECK-NEXT: | `-TemplateTypeParm 0x{{.+}} 'T'
@@ -588,8 +589,9 @@ static_assert(x.size == 4);
// CHECK-NEXT: |-ParmVarDecl 0x{{.+}} <col:18, col:24> col:21 'double (&)[3]'
// CHECK-NEXT: `-ConceptSpecializationExpr 0x{{.+}} <col:36, col:42> 'bool' Concept 0x{{.+}} 'True'
// CHECK-NEXT: |-ImplicitConceptSpecializationDecl 0x{{.+}} <{{.+}}> col:28
-// CHECK-NEXT: | `-TemplateArgument type 'type-parameter-0-0'
-// CHECK-NEXT: | `-TemplateTypeParmType 0x{{.+}} 'type-parameter-0-0' dependent depth 0 index 0
+// CHECK-NEXT: | `-TemplateArgument type 'T'
+// CHECK-NEXT: | `-TemplateTypeParmType 0x{{.+}} 'T' dependent depth 0 index 0
+// CHECK-NEXT: | `-TemplateTypeParm 0x{{.+}} 'T'
// CHECK-NEXT: `-TemplateArgument <{{.+}}> type 'T':'type-parameter-0-0'
// CHECK-NEXT: `-TemplateTypeParmType 0x{{.+}} 'T' dependent depth 0 index 0
// CHECK-NEXT: `-TemplateTypeParm 0x{{.+}} 'T'
@@ -660,8 +662,9 @@ Test test(42);
// CHECK-NEXT: |-TemplateTypeParmDecl {{.*}} Concept {{.*}} 'Constraint' depth 0 index 1 auto:1
// CHECK-NEXT: | `-ConceptSpecializationExpr {{.*}} 'bool' Concept {{.*}} 'Constraint'
// CHECK-NEXT: | |-ImplicitConceptSpecializationDecl {{.*}}
-// CHECK-NEXT: | | |-TemplateArgument type 'type-parameter-0-1'
-// CHECK-NEXT: | | | `-TemplateTypeParmType {{.*}} 'type-parameter-0-1' dependent depth 0 index 1
+// CHECK-NEXT: | | |-TemplateArgument type 'auto:1'
+// CHECK-NEXT: | | | `-TemplateTypeParmType {{.*}} 'auto:1' dependent depth 0 index 1
+// CHECK-NEXT: | | | `-TemplateTypeParm {{.*}} 'auto:1'
// CHECK-NEXT: | | `-TemplateArgument type 'int'
// CHECK-NEXT: | | `-BuiltinType {{.*}} 'int'
// CHECK-NEXT: | |-TemplateArgument {{.*}} type 'auto:1':'type-parameter-0-1'
diff --git a/clang/test/SemaTemplate/instantiate-abbreviated-template.cpp b/clang/test/SemaTemplate/instantiate-abbreviated-template.cpp
index 1f2171a..e03756e 100644
--- a/clang/test/SemaTemplate/instantiate-abbreviated-template.cpp
+++ b/clang/test/SemaTemplate/instantiate-abbreviated-template.cpp
@@ -1,5 +1,6 @@
// RUN: %clang_cc1 -std=c++2a -x c++ %s -verify
+
template<typename...>
concept C = false; // expected-note 9{{because}}
diff --git a/clang/test/SemaTemplate/instantiate-expanded-type-constraint.cpp b/clang/test/SemaTemplate/instantiate-expanded-type-constraint.cpp
index 3edf243..de4a484 100644
--- a/clang/test/SemaTemplate/instantiate-expanded-type-constraint.cpp
+++ b/clang/test/SemaTemplate/instantiate-expanded-type-constraint.cpp
@@ -7,8 +7,7 @@ template<typename T>
constexpr bool is_same_v<T, T> = true;
template<typename T, typename U>
-concept same_as = is_same_v<T, U>;
-// expected-note@-1{{because 'is_same_v<int, bool>' evaluated to false}}
+concept same_as = is_same_v<T, U>; //#is_same_v
template<typename T, typename... Us>
concept either = (is_same_v<T, Us> || ...);
@@ -17,6 +16,7 @@ template<typename... Ts>
struct T {
template<same_as<Ts>... Us>
// expected-note@-1{{because 'same_as<int, bool>' evaluated to false}}
+ // expected-note@#is_same_v{{because 'is_same_v<int, bool>' evaluated to false}}
static void foo(Us... u, int x) { };
// expected-note@-1{{candidate template ignored: deduced too few arguments}}
// expected-note@-2{{candidate template ignored: constraints not satisfied}}
diff --git a/clang/test/SemaTemplate/instantiate-requires-expr.cpp b/clang/test/SemaTemplate/instantiate-requires-expr.cpp
index e60f792..32ad537 100644
--- a/clang/test/SemaTemplate/instantiate-requires-expr.cpp
+++ b/clang/test/SemaTemplate/instantiate-requires-expr.cpp
@@ -72,12 +72,12 @@ namespace type_requirement {
template<typename T> requires
false_v<requires { typename T::template temp<T>; }>
- // expected-note@-1 {{because 'false_v<requires { typename type_requirement::contains_template<int>::template temp<type_requirement::contains_template<int>>; }>' evaluated to false}}
- // expected-note@-2 {{because 'false_v<requires { typename type_requirement::contains_template<short>::template temp<type_requirement::contains_template<short>>; }>' evaluated to false}}
+ // expected-note@-1 {{because 'false_v<requires { typename contains_template<int>::template temp<contains_template<int>>; }>' evaluated to false}}
+ // expected-note@-2 {{because 'false_v<requires { typename contains_template<short>::template temp<contains_template<short>>; }>' evaluated to false}}
struct r2 {};
- using r2i1 = r2<contains_template<int>>; // expected-error{{constraints not satisfied for class template 'r2' [with T = type_requirement::contains_template<int>]}}
- using r2i2 = r2<contains_template<short>>; // expected-error{{constraints not satisfied for class template 'r2' [with T = type_requirement::contains_template<short>]}}
+ using r2i1 = r2<contains_template<int>>; // expected-error{{constraints not satisfied for class template 'r2' [with T = contains_template<int>]}}
+ using r2i2 = r2<contains_template<short>>; // expected-error{{constraints not satisfied for class template 'r2' [with T = contains_template<short>]}}
// substitution error occurs, then requires expr is instantiated again
@@ -108,7 +108,7 @@ namespace type_requirement {
// expected-note@-1 {{because 'false_v<requires { <<error-type>>; } && requires { <<error-type>>; }>' evaluated to false}}
struct r7 {};
- using r7i = r7<int, A>; // expected-error{{constraints not satisfied for class template 'r7' [with Ts = <int, type_requirement::A>]}}
+ using r7i = r7<int, A>; // expected-error{{constraints not satisfied for class template 'r7' [with Ts = <int, A>]}}
}
namespace expr_requirement {
@@ -268,3 +268,13 @@ struct Foo {
};
} // namespace GH110785
+
+namespace sugared_instantiation {
+ template <class C1> concept C = requires { C1{}; };
+ template <class D1> concept D = requires { new D1; };
+
+ // Test that 'deduced auto' doesn't get confused with 'undeduced auto'.
+ auto f() { return 0; }
+ static_assert(requires { { f() } -> C; });
+ static_assert(requires { { f() } -> D; });
+} // namespace sugared_instantiation
diff --git a/clang/test/SemaTemplate/instantiate-template-argument.cpp b/clang/test/SemaTemplate/instantiate-template-argument.cpp
index 43d5d00..7606619 100644
--- a/clang/test/SemaTemplate/instantiate-template-argument.cpp
+++ b/clang/test/SemaTemplate/instantiate-template-argument.cpp
@@ -1,4 +1,6 @@
-// RUN: %clang_cc1 -std=c++2a -x c++ %s -verify
+// RUN: %clang_cc1 -std=c++2a -x c++ %s -verify=expected,cxx20
+// RUN: %clang_cc1 -std=c++2c -x c++ %s -verify
+
template<auto T, decltype(T) U>
concept C1 = sizeof(U) >= 4;
@@ -9,20 +11,101 @@ concept C2 = C1<Y{}, V>;
// sizeof(U) >= 4 [U = V (decltype(Y{}))]
template<char W>
-constexpr int foo() requires C2<int, W> { return 1; }
+constexpr int foo() requires C2<int, W> { return 1; } // #cand1
// sizeof(U) >= 4 [U = W (decltype(int{}))]
template<char X>
-// expected-note@+1{{candidate function}}
-constexpr int foo() requires C1<1, X> && true { return 2; }
+constexpr int foo() requires C1<1, X> && true { return 2; } // #cand2
// sizeof(U) >= 4 [U = X (decltype(1))]
static_assert(foo<'a'>() == 2);
+
template<char Z>
-// expected-note@+1{{candidate function}}
-constexpr int foo() requires C2<long long, Z> && true { return 3; }
+constexpr int foo() requires C2<long long, Z> && true { return 3; } // #cand3
// sizeof(U) >= 4 [U = Z (decltype(long long{}))]
static_assert(foo<'a'>() == 3);
-// expected-error@-1{{call to 'foo' is ambiguous}} \ No newline at end of file
+// expected-error@-1{{call to 'foo' is ambiguous}}
+// expected-note@#cand2 {{candidate function}}
+// expected-note@#cand3 {{candidate function}}
+
+
+namespace case1 {
+
+template<auto T, decltype(T) U>
+concept C1 = sizeof(T) >= 4; // #case1_C1
+
+template<typename Y, char V>
+concept C2 = C1<Y{}, V>; // #case1_C2
+
+template<class T, char W>
+constexpr int foo() requires C2<T, W> { return 1; } // #case1_foo1
+
+template<class T, char X>
+constexpr int foo() requires C1<T{}, X> && true { return 2; } // #case1_foo2
+
+static_assert(foo<char, 'a'>() == 2);
+// expected-error@-1{{no matching function for call to 'foo'}}
+// expected-note@#case1_foo1{{candidate template ignored: constraints not satisfied [with T = char, W = 'a']}}
+// expected-note@#case1_foo1{{because 'C2<char, 'a'>' evaluated to false}}
+// expected-note@#case1_C2{{because 'C1<char{}, 'a'>' evaluated to false}}
+// expected-note@#case1_C1{{because 'sizeof ('\x00') >= 4' (1 >= 4) evaluated to false}}
+// expected-note@#case1_foo2{{candidate template ignored: constraints not satisfied [with T = char, X = 'a']}}
+// expected-note@#case1_foo2{{because 'C1<char{}, 'a'>' evaluated to false}}
+// expected-note@#case1_C1{{because 'sizeof ('\x00') >= 4' (1 >= 4) evaluated to false}}
+
+static_assert(foo<int, 'a'>() == 2);
+
+}
+
+namespace packs {
+
+template<auto T, decltype(T) U>
+concept C1 = sizeof(U) >= 4;
+
+template<typename Y, char V>
+concept C2 = C1<Y{}, V>;
+
+template<char... W>
+constexpr int foo() requires (C2<int, W> && ...) { return 1; } // #packs-cand1
+
+template<char... X>
+constexpr int foo() requires (C1<1, X> && ...) && true { return 2; } // #packs-cand2
+
+static_assert(foo<'a'>() == 2);
+// cxx20-error@-1{{call to 'foo' is ambiguous}}
+// cxx20-note@#packs-cand1 {{candidate function}}
+// cxx20-note@#packs-cand2 {{candidate function}}
+
+}
+
+namespace case2 {
+template<auto T> concept C1 = sizeof(decltype(T)) >= 0;
+template<typename Y> concept C2 = C1<Y{}>;
+
+template<char W>
+constexpr int foo() requires C2<int> { return 1; }
+
+template<char X>
+constexpr int foo() requires C1<0> && true { return 2; }
+
+static_assert(foo<0>() == 2);
+}
+
+namespace case3 {
+template<auto T> concept C1 = sizeof(decltype(T)) >= 0;
+
+template<typename Y> concept C2 = C1<Y{}>;
+
+template<char W>
+constexpr int foo() requires C2<int> { return 1; } // #case3_foo1
+
+template<char X>
+constexpr int foo() requires C1<1> && true { return 2; } // #case3_foo2
+
+static_assert(foo<0>() == 2);
+// expected-error@-1{{call to 'foo' is ambiguous}}
+// expected-note@#case3_foo1 {{candidate function}}
+// expected-note@#case3_foo2 {{candidate function}}
+}
diff --git a/clang/test/SemaTemplate/pr52970.cpp b/clang/test/SemaTemplate/pr52970.cpp
index 7aac5ee..6aabc41 100644
--- a/clang/test/SemaTemplate/pr52970.cpp
+++ b/clang/test/SemaTemplate/pr52970.cpp
@@ -53,7 +53,7 @@ static_assert(!DotFollowingPointer::f(Bad{}), "");
#if __cplusplus >= 202002L
template <class T>
concept C = requires(T t) { t.begin(); };
- // cxx20-note@-1 {{because 't.begin()' would be invalid: member reference type 'Holder<Incomplete> *' is a pointer}}
+ // cxx20-note@-1 {{because 't.begin()' would be invalid: member reference type 'Bad' (aka 'Holder<Incomplete> *') is a pointer}}
static_assert(C<Good>);
static_assert(!C<Bad>);
diff --git a/clang/tools/clang-import-test/clang-import-test.cpp b/clang/tools/clang-import-test/clang-import-test.cpp
index 910e08c..977cec1 100644
--- a/clang/tools/clang-import-test/clang-import-test.cpp
+++ b/clang/tools/clang-import-test/clang-import-test.cpp
@@ -216,7 +216,7 @@ std::unique_ptr<CompilerInstance> BuildCompilerInstance() {
Ins->getTarget().adjust(Ins->getDiagnostics(), Ins->getLangOpts(),
/*AuxTarget=*/nullptr);
Ins->createFileManager();
- Ins->createSourceManager(Ins->getFileManager());
+ Ins->createSourceManager();
Ins->createPreprocessor(TU_Complete);
return Ins;
diff --git a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
index be658aca..1419b8c 100644
--- a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
+++ b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
@@ -608,10 +608,10 @@ Expected<StringRef> linkDevice(ArrayRef<StringRef> InputFiles,
Error containerizeRawImage(std::unique_ptr<MemoryBuffer> &Img, OffloadKind Kind,
const ArgList &Args) {
llvm::Triple Triple(Args.getLastArgValue(OPT_triple_EQ));
- if (Kind != OFK_OpenMP || !Triple.isSPIRV() ||
- Triple.getVendor() != llvm::Triple::Intel)
- return Error::success();
- return offloading::intel::containerizeOpenMPSPIRVImage(Img);
+ if (Kind == OFK_OpenMP && Triple.isSPIRV() &&
+ Triple.getVendor() == llvm::Triple::Intel)
+ return offloading::intel::containerizeOpenMPSPIRVImage(Img);
+ return Error::success();
}
Expected<StringRef> writeOffloadFile(const OffloadFile &File) {
diff --git a/clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp b/clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp
index 8dd993f..de20e74 100644
--- a/clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp
+++ b/clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp
@@ -27,22 +27,16 @@
#include "llvm/LTO/LTO.h"
#include "llvm/Linker/Linker.h"
#include "llvm/MC/TargetRegistry.h"
-#include "llvm/Object/Archive.h"
-#include "llvm/Object/ArchiveWriter.h"
#include "llvm/Object/Binary.h"
-#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Object/IRObjectFile.h"
-#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/OffloadBinary.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
-#include "llvm/Remarks/HotnessThresholdParser.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/FileOutputBuffer.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/InitLLVM.h"
-#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/Signals.h"
@@ -468,9 +462,10 @@ static Error runAOTCompile(StringRef InputFile, StringRef OutputFile,
// TODO: Consider using LLVM-IR metadata to identify globals of interest
bool isKernel(const Function &F) {
- const CallingConv::ID CC = F.getCallingConv();
- return CC == CallingConv::SPIR_KERNEL || CC == CallingConv::AMDGPU_KERNEL ||
- CC == CallingConv::PTX_Kernel;
+ const llvm::CallingConv::ID CC = F.getCallingConv();
+ return CC == llvm::CallingConv::SPIR_KERNEL ||
+ CC == llvm::CallingConv::AMDGPU_KERNEL ||
+ CC == llvm::CallingConv::PTX_Kernel;
}
/// Performs the following steps:
diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp
index 30e2be7..c39f337 100644
--- a/clang/tools/libclang/CIndex.cpp
+++ b/clang/tools/libclang/CIndex.cpp
@@ -2832,10 +2832,8 @@ void OpenACCClauseEnqueue::VisitTileClause(const OpenACCTileClause &C) {
void OpenACCClauseEnqueue::VisitPrivateClause(const OpenACCPrivateClause &C) {
VisitVarList(C);
- for (const OpenACCPrivateRecipe &R : C.getInitRecipes()) {
+ for (const OpenACCPrivateRecipe &R : C.getInitRecipes())
Visitor.AddDecl(R.AllocaDecl);
- Visitor.AddStmt(R.InitExpr);
- }
}
void OpenACCClauseEnqueue::VisitHostClause(const OpenACCHostClause &C) {
@@ -2851,7 +2849,6 @@ void OpenACCClauseEnqueue::VisitFirstPrivateClause(
VisitVarList(C);
for (const OpenACCFirstPrivateRecipe &R : C.getInitRecipes()) {
Visitor.AddDecl(R.AllocaDecl);
- Visitor.AddStmt(R.InitExpr);
Visitor.AddDecl(R.InitFromTemporary);
}
}
@@ -2927,10 +2924,8 @@ void OpenACCClauseEnqueue::VisitDeviceTypeClause(
void OpenACCClauseEnqueue::VisitReductionClause(
const OpenACCReductionClause &C) {
VisitVarList(C);
- for (const OpenACCReductionRecipe &R : C.getRecipes()) {
+ for (const OpenACCReductionRecipe &R : C.getRecipes())
Visitor.AddDecl(R.AllocaDecl);
- Visitor.AddStmt(R.InitExpr);
- }
}
void OpenACCClauseEnqueue::VisitAutoClause(const OpenACCAutoClause &C) {}
void OpenACCClauseEnqueue::VisitIndependentClause(
diff --git a/clang/unittests/Analysis/CFGTest.cpp b/clang/unittests/Analysis/CFGTest.cpp
index 46a6751..6aa09a8 100644
--- a/clang/unittests/Analysis/CFGTest.cpp
+++ b/clang/unittests/Analysis/CFGTest.cpp
@@ -93,6 +93,159 @@ TEST(CFG, DependantBaseAddImplicitDtors) {
.getStatus());
}
+TEST(CFG, SwitchCoveredEnumNoDefault) {
+ const char *Code = R"(
+ enum class E {E1, E2};
+ int f(E e) {
+ switch(e) {
+ case E::E1:
+ return 1;
+ case E::E2:
+ return 2;
+ }
+ return 0;
+ }
+ )";
+ CFG::BuildOptions Options;
+ Options.AssumeReachableDefaultInSwitchStatements = true;
+ BuildResult B = BuildCFG(Code, Options);
+ ASSERT_EQ(BuildResult::BuiltCFG, B.getStatus());
+
+ // [B5 (ENTRY)]
+ // Succs (1): B2
+ //
+ // [B1]
+ // 1: 0
+ // 2: return [B1.1];
+ // Preds (1): B2
+ // Succs (1): B0
+ //
+ // [B2]
+ // 1: e (ImplicitCastExpr, LValueToRValue, E)
+ // T: switch [B2.1]
+ // Preds (1): B5
+ // Succs (3): B3 B4 B1
+ //
+ // [B3]
+ // case E::E2:
+ // 1: 2
+ // 2: return [B3.1];
+ // Preds (1): B2
+ // Succs (1): B0
+ //
+ // [B4]
+ // case E::E1:
+ // 1: 1
+ // 2: return [B4.1];
+ // Preds (1): B2
+ // Succs (1): B0
+ //
+ // [B0 (EXIT)]
+ // Preds (3): B1 B3 B4
+
+ auto *CFG = B.getCFG();
+ const auto &Entry = CFG->getEntry();
+ ASSERT_EQ(1u, Entry.succ_size());
+ // First successor of Entry is the switch
+ CFGBlock *SwitchBlock = *Entry.succ_begin();
+ ASSERT_EQ(3u, SwitchBlock->succ_size());
+ // Last successor of the switch is after the switch
+ auto NoCaseSucc = SwitchBlock->succ_rbegin();
+ EXPECT_TRUE(NoCaseSucc->isReachable());
+
+ // Checking that the same node is Unreachable without this setting
+ Options.AssumeReachableDefaultInSwitchStatements = false;
+ B = BuildCFG(Code, Options);
+ ASSERT_EQ(BuildResult::BuiltCFG, B.getStatus());
+
+ const auto &Entry2 = B.getCFG()->getEntry();
+ ASSERT_EQ(1u, Entry2.succ_size());
+ CFGBlock *SwitchBlock2 = *Entry2.succ_begin();
+ ASSERT_EQ(3u, SwitchBlock2->succ_size());
+ auto NoCaseSucc2 = SwitchBlock2->succ_rbegin();
+ EXPECT_FALSE(NoCaseSucc2->isReachable());
+}
+
+TEST(CFG, SwitchCoveredEnumWithDefault) {
+ const char *Code = R"(
+ enum class E {E1, E2};
+ int f(E e) {
+ switch(e) {
+ case E::E1:
+ return 1;
+ case E::E2:
+ return 2;
+ default:
+ return 0;
+ }
+ return -1;
+ }
+ )";
+ CFG::BuildOptions Options;
+ Options.AssumeReachableDefaultInSwitchStatements = true;
+ BuildResult B = BuildCFG(Code, Options);
+ ASSERT_EQ(BuildResult::BuiltCFG, B.getStatus());
+
+ // [B6 (ENTRY)]
+ // Succs (1): B2
+ //
+ // [B1]
+ // 1: -1
+ // 2: return [B1.1];
+ // Succs (1): B0
+ //
+ // [B2]
+ // 1: e (ImplicitCastExpr, LValueToRValue, E)
+ // T: switch [B2.1]
+ // Preds (1): B6
+ // Succs (3): B4 B5 B3
+ //
+ // [B3]
+ // default:
+ // 1: 0
+ // 2: return [B3.1];
+ // Preds (1): B2
+ // Succs (1): B0
+ //
+ // [B4]
+ // case E::E2:
+ // 1: 2
+ // 2: return [B4.1];
+ // Preds (1): B2
+ // Succs (1): B0
+ //
+ // [B5]
+ // case E::E1:
+ // 1: 1
+ // 2: return [B5.1];
+ // Preds (1): B2
+ // Succs (1): B0
+ //
+ // [B0 (EXIT)]
+ // Preds (4): B1 B3 B4 B5
+
+ const auto &Entry = B.getCFG()->getEntry();
+ ASSERT_EQ(1u, Entry.succ_size());
+ // First successor of Entry is the switch
+ CFGBlock *SwitchBlock = *Entry.succ_begin();
+ ASSERT_EQ(3u, SwitchBlock->succ_size());
+ // Last successor of the switch is the default branch
+ auto defaultBlock = SwitchBlock->succ_rbegin();
+ EXPECT_TRUE(defaultBlock->isReachable());
+
+ // Checking that the same node is Unreachable without this setting
+ Options.AssumeReachableDefaultInSwitchStatements = false;
+ B = BuildCFG(Code, Options);
+ ASSERT_EQ(BuildResult::BuiltCFG, B.getStatus());
+
+ const auto &Entry2 = B.getCFG()->getEntry();
+ ASSERT_EQ(1u, Entry2.succ_size());
+ CFGBlock *SwitchBlock2 = *Entry2.succ_begin();
+ ASSERT_EQ(3u, SwitchBlock2->succ_size());
+ auto defaultBlock2 = SwitchBlock2->succ_rbegin();
+ EXPECT_FALSE(defaultBlock2->isReachable());
+}
+
TEST(CFG, IsLinear) {
auto expectLinear = [](bool IsLinear, const char *Code) {
BuildResult B = BuildCFG(Code);
diff --git a/clang/unittests/CodeGen/TestCompiler.h b/clang/unittests/CodeGen/TestCompiler.h
index 57b5b07..9bd9060 100644
--- a/clang/unittests/CodeGen/TestCompiler.h
+++ b/clang/unittests/CodeGen/TestCompiler.h
@@ -52,7 +52,7 @@ struct TestCompiler {
PtrSize = TInfo.getPointerWidth(clang::LangAS::Default) / 8;
compiler.createFileManager();
- compiler.createSourceManager(compiler.getFileManager());
+ compiler.createSourceManager();
compiler.createPreprocessor(clang::TU_Prefix);
compiler.createASTContext();
diff --git a/clang/unittests/Frontend/CompilerInstanceTest.cpp b/clang/unittests/Frontend/CompilerInstanceTest.cpp
index 36cac5a..cd3fefa 100644
--- a/clang/unittests/Frontend/CompilerInstanceTest.cpp
+++ b/clang/unittests/Frontend/CompilerInstanceTest.cpp
@@ -33,7 +33,7 @@ TEST(CompilerInstance, DefaultVFSOverlayFromInvocation) {
SmallString<256> CurrentPath;
sys::fs::current_path(CurrentPath);
- sys::fs::make_absolute(CurrentPath, FileName);
+ sys::path::make_absolute(CurrentPath, FileName);
// Mount the VFS file itself on the path 'virtual.file'. Makes this test
// a bit shorter than creating a new dummy file just for this purpose.
diff --git a/clang/unittests/Lex/LexHLSLRootSignatureTest.cpp b/clang/unittests/Lex/LexHLSLRootSignatureTest.cpp
index 01f8d4f..82f1968 100644
--- a/clang/unittests/Lex/LexHLSLRootSignatureTest.cpp
+++ b/clang/unittests/Lex/LexHLSLRootSignatureTest.cpp
@@ -226,6 +226,9 @@ TEST_F(LexHLSLRootSignatureTest, ValidLexAllTokensTest) {
STATIC_BORDER_COLOR_OPAQUE_WHITE
STATIC_BORDER_COLOR_OPAQUE_BLACK_UINT
STATIC_BORDER_COLOR_OPAQUE_WHITE_UINT
+
+ UINT_BORDER_COLOR
+ NON_NORMALIZED_COORDINATES
)cc";
hlsl::RootSignatureLexer Lexer(Source);
diff --git a/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp b/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp
index 9b9f5dd..f7e9d2d 100644
--- a/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp
+++ b/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp
@@ -263,7 +263,8 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseStaticSamplerTest) {
filter = FILTER_MAXIMUM_MIN_POINT_MAG_LINEAR_MIP_POINT,
maxLOD = 9000, addressU = TEXTURE_ADDRESS_MIRROR,
comparisonFunc = COMPARISON_NOT_EQUAL,
- borderColor = STATIC_BORDER_COLOR_OPAQUE_BLACK_UINT
+ borderColor = STATIC_BORDER_COLOR_OPAQUE_BLACK_UINT,
+ flags = 0
)
)cc";
@@ -336,6 +337,37 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseStaticSamplerTest) {
ASSERT_TRUE(Consumer->isSatisfied());
}
+TEST_F(ParseHLSLRootSignatureTest, ValidStaticSamplerFlagsTest) {
+ const llvm::StringLiteral Source = R"cc(
+ StaticSampler(s0, flags = UINT_BORDER_COLOR | NON_NORMALIZED_COORDINATES)
+ )cc";
+
+ auto Ctx = createMinimalASTContext();
+ StringLiteral *Signature = wrapSource(Ctx, Source);
+
+ TrivialModuleLoader ModLoader;
+ auto PP = createPP(Source, ModLoader);
+
+ hlsl::RootSignatureParser Parser(RootSignatureVersion::V1_1, Signature, *PP);
+
+ // Test no diagnostics produced
+ Consumer->setNoDiag();
+
+ ASSERT_FALSE(Parser.parse());
+
+ auto Elements = Parser.getElements();
+ ASSERT_EQ(Elements.size(), 1u);
+
+ RootElement Elem = Elements[0].getElement();
+ ASSERT_TRUE(std::holds_alternative<StaticSampler>(Elem));
+ auto ValidStaticSamplerFlags =
+ llvm::dxbc::StaticSamplerFlags::NonNormalizedCoordinates |
+ llvm::dxbc::StaticSamplerFlags::UintBorderColor;
+ ASSERT_EQ(std::get<StaticSampler>(Elem).Flags, ValidStaticSamplerFlags);
+
+ ASSERT_TRUE(Consumer->isSatisfied());
+}
+
TEST_F(ParseHLSLRootSignatureTest, ValidParseFloatsTest) {
const llvm::StringLiteral Source = R"cc(
StaticSampler(s0, mipLODBias = 0),
diff --git a/clang/unittests/Serialization/ForceCheckFileInputTest.cpp b/clang/unittests/Serialization/ForceCheckFileInputTest.cpp
index 24e2fd6..edf33ae 100644
--- a/clang/unittests/Serialization/ForceCheckFileInputTest.cpp
+++ b/clang/unittests/Serialization/ForceCheckFileInputTest.cpp
@@ -122,8 +122,8 @@ export int aa = 43;
Clang.setDiagnostics(Diags);
Clang.createVirtualFileSystem(CIOpts.VFS);
- FileManager *FM = Clang.createFileManager();
- Clang.createSourceManager(*FM);
+ Clang.createFileManager();
+ Clang.createSourceManager();
EXPECT_TRUE(Clang.createTarget());
Clang.createPreprocessor(TU_Complete);
diff --git a/clang/unittests/StaticAnalyzer/CallEventTest.cpp b/clang/unittests/StaticAnalyzer/CallEventTest.cpp
index 8b5289e..f426892 100644
--- a/clang/unittests/StaticAnalyzer/CallEventTest.cpp
+++ b/clang/unittests/StaticAnalyzer/CallEventTest.cpp
@@ -84,6 +84,47 @@ TEST(CXXDeallocatorCall, SimpleDestructor) {
#endif
}
+TEST(PrivateMethodCache, NeverReturnDanglingPointersWithMultipleASTs) {
+ // Each iteration will load and unload an AST multiple times. Since the code
+ // is always the same, we increase the chance of hitting a bug in the private
+ // method cache, returning a dangling pointer and crashing the process. If the
+ // cache is properly cleared between runs, the test should pass.
+ for (int I = 0; I < 100; ++I) {
+ auto const *Code = R"(
+ typedef __typeof(sizeof(int)) size_t;
+
+ extern void *malloc(size_t size);
+ extern void *memcpy(void *dest, const void *src, size_t n);
+
+ @interface SomeMoreData {
+ char const* _buffer;
+ int _size;
+ }
+ @property(nonatomic, readonly) const char* buffer;
+ @property(nonatomic) int size;
+
+ - (void)appendData:(SomeMoreData*)other;
+
+ @end
+
+ @implementation SomeMoreData
+ @synthesize size = _size;
+ @synthesize buffer = _buffer;
+
+ - (void)appendData:(SomeMoreData*)other {
+ int const len = (_size + other.size); // implicit self._length
+ char* d = malloc(sizeof(char) * len);
+ memcpy(d + 20, other.buffer, len);
+ }
+
+ @end
+ )";
+ std::string Diags;
+ EXPECT_TRUE(runCheckerOnCodeWithArgs<addCXXDeallocatorChecker>(
+ Code, {"-x", "objective-c", "-Wno-objc-root-class"}, Diags));
+ }
+}
+
} // namespace
} // namespace ento
} // namespace clang
diff --git a/clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp b/clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp
index 80289ef..aa32bb3 100644
--- a/clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp
+++ b/clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp
@@ -65,7 +65,7 @@ public:
if (!Compiler.hasDiagnostics())
return false;
- Compiler.createSourceManager(*FileMgr);
+ Compiler.createSourceManager();
Compiler.addDependencyCollector(std::make_shared<TestFileCollector>(
Compiler.getInvocation().getDependencyOutputOpts(), Deps));
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index f73b0aecc..74f29ac 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -133,28 +133,20 @@ static BasicType ParseBasicType(char c) {
switch (c) {
case 'c':
return BasicType::Int8;
- break;
case 's':
return BasicType::Int16;
- break;
case 'i':
return BasicType::Int32;
- break;
case 'l':
return BasicType::Int64;
- break;
case 'x':
return BasicType::Float16;
- break;
case 'f':
return BasicType::Float32;
- break;
case 'd':
return BasicType::Float64;
- break;
case 'y':
return BasicType::BFloat16;
- break;
default:
return BasicType::Unknown;
}
diff --git a/clang/utils/perf-training/CMakeLists.txt b/clang/utils/perf-training/CMakeLists.txt
index 1d7bb78..2cd4c4c 100644
--- a/clang/utils/perf-training/CMakeLists.txt
+++ b/clang/utils/perf-training/CMakeLists.txt
@@ -6,6 +6,10 @@ set(CLANG_PGO_TRAINING_DATA "${CMAKE_CURRENT_SOURCE_DIR}" CACHE PATH
set(CLANG_PGO_TRAINING_DATA_SOURCE_DIR OFF CACHE STRING "Path to source directory containing cmake project with source files to use for generating pgo data")
set(CLANG_PGO_TRAINING_DEPS "" CACHE STRING "Extra dependencies needed to build the PGO training data.")
+add_custom_target(clear-perf-data
+ COMMAND "${Python3_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py clean ${CMAKE_CURRENT_BINARY_DIR} perf.data
+ COMMENT "Clearing old perf data")
+
option(CLANG_PGO_TRAINING_USE_LLVM_BUILD "Use LLVM build for generating PGO data" ON)
llvm_canonicalize_cmake_booleans(
@@ -21,7 +25,7 @@ if(LLVM_BUILD_INSTRUMENTED)
add_lit_testsuite(generate-profraw "Generating clang PGO data"
${CMAKE_CURRENT_BINARY_DIR}/pgo-data/
EXCLUDE_FROM_CHECK_ALL
- DEPENDS clear-profraw
+ DEPENDS clear-profraw clang
)
add_custom_target(clear-profraw
@@ -55,6 +59,32 @@ if(LLVM_BUILD_INSTRUMENTED)
USE_TOOLCHAIN EXLUDE_FROM_ALL NO_INSTALL DEPENDS generate-profraw)
add_dependencies(generate-profdata generate-profraw-external)
endif()
+
+ if(NOT LLVM_PROFGEN)
+ find_program(LLVM_PROFGEN llvm-profgen)
+ endif()
+
+ if(NOT LLVM_PROFGEN)
+ message(STATUS "To enable converting CSSPGO samples LLVM_PROFGEN has to point to llvm-profgen")
+ elseif(NOT CLANG_PGO_TRAINING_DATA_SOURCE_DIR)
+ message(STATUS "CLANG_PGO_TRAINING_DATA_SOURCE_DIR must be set to collect CSSPGO samples")
+ else()
+ set(PERF_HELPER "${Python3_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py)
+ set(CLANG_SPROFDATA ${CMAKE_CURRENT_BINARY_DIR}/clang.sprofdata)
+ add_custom_command(
+ OUTPUT ${CLANG_SPROFDATA}
+ # Execute generate-profraw-external under perf
+ COMMAND ${PERF_HELPER} perf --csspgo -- ${CMAKE_COMMAND} --build ${CMAKE_BINARY_DIR} --target generate-profraw-external
+ # Convert perf profile into profraw
+ COMMAND ${PERF_HELPER} perf2prof ${LLVM_PROFGEN} $<TARGET_FILE:clang> ${CMAKE_CURRENT_BINARY_DIR}
+ # Merge profdata
+ COMMAND ${PERF_HELPER} merge --sample ${LLVM_PROFDATA} ${CLANG_SPROFDATA} ${CMAKE_CURRENT_BINARY_DIR}
+ DEPENDS clang ${CLANG_PGO_TRAINING_DEPS} clear-perf-data generate-profraw-external-clean
+ VERBATIM
+ USES_TERMINAL
+ )
+ add_custom_target(generate-sprofdata DEPENDS ${CLANG_SPROFDATA})
+ endif()
endif()
endif()
@@ -104,8 +134,4 @@ if(CLANG_BOLT AND NOT LLVM_BUILD_INSTRUMENTED)
COMMAND "${Python3_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py clean ${CMAKE_CURRENT_BINARY_DIR} fdata
COMMENT "Clearing old BOLT fdata")
- add_custom_target(clear-perf-data
- COMMAND "${Python3_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py clean ${CMAKE_CURRENT_BINARY_DIR} perf.data
- COMMENT "Clearing old perf data")
-
endif()
diff --git a/clang/utils/perf-training/perf-helper.py b/clang/utils/perf-training/perf-helper.py
index ab4491d..1c7904e 100644
--- a/clang/utils/perf-training/perf-helper.py
+++ b/clang/utils/perf-training/perf-helper.py
@@ -45,14 +45,22 @@ def clean(args):
def merge(args):
- if len(args) < 3:
- print(
- "Usage: %s merge <llvm-profdata> <output> <paths>\n" % __file__
- + "\tMerges all profraw files from path into output."
- )
- return 1
- cmd = [args[0], "merge", "-o", args[1]]
- for path in args[2:]:
+ parser = argparse.ArgumentParser(
+ prog="perf-helper merge",
+ description="Merges all profraw files from path(s) into output",
+ )
+ parser.add_argument("profdata", help="Path to llvm-profdata tool")
+ parser.add_argument("output", help="Output filename")
+ parser.add_argument(
+ "paths", nargs="+", help="Folder(s) containing input profraw files"
+ )
+ parser.add_argument("--sample", action="store_true", help="Sample profile")
+ opts = parser.parse_args(args)
+
+ cmd = [opts.profdata, "merge", "-o", opts.output]
+ if opts.sample:
+ cmd += ["--sample"]
+ for path in opts.paths:
cmd.extend(findFilesWithExtension(path, "profraw"))
subprocess.check_call(cmd)
return 0
@@ -73,25 +81,30 @@ def merge_fdata(args):
def perf(args):
parser = argparse.ArgumentParser(
- prog="perf-helper perf", description="perf wrapper for BOLT profile collection"
+ prog="perf-helper perf",
+ description="perf wrapper for BOLT/CSSPGO profile collection",
)
parser.add_argument(
"--lbr", action="store_true", help="Use perf with branch stacks"
)
+ parser.add_argument("--csspgo", action="store_true", help="Enable CSSPGO flags")
parser.add_argument("cmd", nargs=argparse.REMAINDER, help="")
opts = parser.parse_args(args)
cmd = opts.cmd[1:]
+ event = "br_inst_retired.near_taken:uppp" if opts.csspgo else "cycles:u"
perf_args = [
"perf",
"record",
- "--event=cycles:u",
+ f"--event={event}",
"--freq=max",
"--output=%d.perf.data" % os.getpid(),
]
- if opts.lbr:
+ if opts.lbr or opts.csspgo:
perf_args += ["--branch-filter=any,u"]
+ if opts.csspgo:
+ perf_args += ["-g", "--call-graph=fp"]
perf_args.extend(cmd)
start_time = time.time()
@@ -127,6 +140,30 @@ def perf2bolt(args):
return 0
+def perf2prof(args):
+ parser = argparse.ArgumentParser(
+ prog="perf-helper perf2prof",
+ description="perf to CSSPGO prof conversion wrapper",
+ )
+ parser.add_argument("profgen", help="Path to llvm-profgen binary")
+ parser.add_argument("binary", help="Input binary")
+ parser.add_argument("paths", nargs="+", help="Path containing perf.data files")
+ opts = parser.parse_args(args)
+
+ profgen_args = [opts.profgen, f"--binary={opts.binary}"]
+ for path in opts.paths:
+ for filename in findFilesWithExtension(path, "perf.data"):
+ subprocess.run(
+ [
+ *profgen_args,
+ f"--perfdata={filename}",
+ f"--output={filename}.profraw",
+ ],
+ check=True,
+ )
+ return 0
+
+
def dtrace(args):
parser = argparse.ArgumentParser(
prog="perf-helper dtrace",
@@ -660,7 +697,10 @@ def bolt_optimize(args):
process.check_returncode()
if opts.method in ["PERF", "LBR"]:
- perf2bolt([opts.bolt, opts.perf_training_binary_dir, opts.input])
+ args = [opts.bolt, opts.perf_training_binary_dir, opts.input]
+ if opts.method == "LBR":
+ args.extend("--lbr")
+ perf2bolt(args)
merge_fdata([opts.merge_fdata, opts.fdata, opts.perf_training_binary_dir])
@@ -707,6 +747,7 @@ commands = {
"merge-fdata": merge_fdata,
"perf": perf,
"perf2bolt": perf2bolt,
+ "perf2prof": perf2prof,
}
diff --git a/compiler-rt/lib/builtins/CMakeLists.txt b/compiler-rt/lib/builtins/CMakeLists.txt
index 0d7fc65..9095b05 100644
--- a/compiler-rt/lib/builtins/CMakeLists.txt
+++ b/compiler-rt/lib/builtins/CMakeLists.txt
@@ -816,14 +816,15 @@ set(s390x_SOURCES
${GENERIC_TF_SOURCES}
)
-set(wasm32_SOURCES
- ${GENERIC_TF_SOURCES}
- ${GENERIC_SOURCES}
-)
-set(wasm64_SOURCES
+
+set(wasm_SOURCES
+ wasm/__c_longjmp.S
+ wasm/__cpp_exceptions.S
${GENERIC_TF_SOURCES}
${GENERIC_SOURCES}
)
+set(wasm32_SOURCES ${wasm_SOURCES})
+set(wasm64_SOURCES ${wasm_SOURCES})
set(ve_SOURCES
ve/grow_stack.S
diff --git a/compiler-rt/lib/builtins/cpu_model/aarch64.c b/compiler-rt/lib/builtins/cpu_model/aarch64.c
index d788052..8af736d 100644
--- a/compiler-rt/lib/builtins/cpu_model/aarch64.c
+++ b/compiler-rt/lib/builtins/cpu_model/aarch64.c
@@ -69,15 +69,15 @@ struct {
#if defined(__APPLE__)
#include "aarch64/fmv/apple.inc"
#elif defined(__FreeBSD__) || defined(__OpenBSD__)
-#include "aarch64/fmv/mrs.inc"
+#include "aarch64/fmv/hwcap.inc"
#include "aarch64/fmv/elf_aux_info.inc"
#elif defined(__Fuchsia__)
#include "aarch64/fmv/fuchsia.inc"
#elif defined(__ANDROID__)
-#include "aarch64/fmv/mrs.inc"
+#include "aarch64/fmv/hwcap.inc"
#include "aarch64/fmv/android.inc"
#elif defined(__linux__) && __has_include(<sys/auxv.h>)
-#include "aarch64/fmv/mrs.inc"
+#include "aarch64/fmv/hwcap.inc"
#include "aarch64/fmv/getauxval.inc"
#elif defined(_WIN32)
#include "aarch64/fmv/windows.inc"
diff --git a/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/mrs.inc b/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/hwcap.inc
index afe9d4e..0f56cef 100644
--- a/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/mrs.inc
+++ b/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/hwcap.inc
@@ -7,9 +7,6 @@ static void __init_cpu_features_constructor(unsigned long hwcap,
const __ifunc_arg_t *arg) {
unsigned long long feat = 0;
#define setCPUFeature(F) feat |= 1ULL << F
-#define getCPUFeature(id, ftr) __asm__("mrs %0, " #id : "=r"(ftr))
-#define extractBits(val, start, number) \
- (val & ((1ULL << number) - 1ULL) << start) >> start
unsigned long hwcap2 = 0;
if (hwcap & _IFUNC_ARG_HWCAP)
hwcap2 = arg->_hwcap2;
diff --git a/compiler-rt/lib/builtins/wasm/__c_longjmp.S b/compiler-rt/lib/builtins/wasm/__c_longjmp.S
new file mode 100644
index 0000000..d130862
--- /dev/null
+++ b/compiler-rt/lib/builtins/wasm/__c_longjmp.S
@@ -0,0 +1,26 @@
+//===-- __c_longjmp.S - Implement __c_longjmp -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __c_longjmp which LLVM uses to implenmet setjmp/longjmp
+// when Wasm EH is enabled.
+//
+//===----------------------------------------------------------------------===//
+
+#ifdef __wasm_exception_handling__
+
+#ifdef __wasm64__
+#define PTR i64
+#else
+#define PTR i32
+#endif
+
+.globl __c_longjmp
+.tagtype __c_longjmp PTR
+__c_longjmp:
+
+#endif // !__wasm_exception_handling__
diff --git a/compiler-rt/lib/builtins/wasm/__cpp_exception.S b/compiler-rt/lib/builtins/wasm/__cpp_exception.S
new file mode 100644
index 0000000..0496e1d
--- /dev/null
+++ b/compiler-rt/lib/builtins/wasm/__cpp_exception.S
@@ -0,0 +1,26 @@
+//===-- __cpp_exception.S - Implement __cpp_exception ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __cpp_exception which LLVM uses to implement exception
+// handling when Wasm EH is enabled.
+//
+//===----------------------------------------------------------------------===//
+
+#ifdef __wasm_exception_handling__
+
+#ifdef __wasm64__
+#define PTR i64
+#else
+#define PTR i32
+#endif
+
+.globl __cpp_exception
+.tagtype __cpp_exception PTR
+__cpp_exception:
+
+#endif // !__wasm_exception_handling__
diff --git a/compiler-rt/test/asan/TestCases/wcscat.cpp b/compiler-rt/test/asan/TestCases/wcscat.cpp
index dcdff88..f0a8ec1 100644
--- a/compiler-rt/test/asan/TestCases/wcscat.cpp
+++ b/compiler-rt/test/asan/TestCases/wcscat.cpp
@@ -1,26 +1,26 @@
-// RUN: %clangxx_asan -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
-// RUN: %clangxx_asan -O1 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
-// RUN: %clangxx_asan -O2 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
-// RUN: %clangxx_asan -O3 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O0 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O1 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O2 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O3 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
#include <stdio.h>
#include <wchar.h>
int main() {
- wchar_t *start = L"X means ";
- wchar_t *append = L"dog";
+ const wchar_t *start = L"X means ";
+ const wchar_t *append = L"dog";
wchar_t goodDst[12];
wcscpy(goodDst, start);
wcscat(goodDst, append);
wchar_t badDst[9];
wcscpy(badDst, start);
- printf("Good so far.\n");
+ fprintf(stderr, "Good so far.\n");
// CHECK: Good so far.
- fflush(stdout);
+ fflush(stderr);
wcscat(badDst, append); // Boom!
// CHECK: ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}}
- // CHECK: WRITE of size {{[0-9]+}} at [[ADDR:0x[0-9a-f]+]] thread T0
- // CHECK: #0 [[ADDR:0x[0-9a-f]+]] in wcscat{{.*}}sanitizer_common_interceptors.inc:{{[0-9]+}}
+ // CHECK: WRITE of size {{[0-9]+}} at [[ADDR]] thread T0
+ // CHECK: #0 {{0x[0-9a-f]+}} in wcscat
printf("Should have failed with ASAN error.\n");
} \ No newline at end of file
diff --git a/compiler-rt/test/asan/TestCases/wcscpy.cpp b/compiler-rt/test/asan/TestCases/wcscpy.cpp
index 414d833..a280d29 100644
--- a/compiler-rt/test/asan/TestCases/wcscpy.cpp
+++ b/compiler-rt/test/asan/TestCases/wcscpy.cpp
@@ -1,23 +1,23 @@
-// RUN: %clangxx_asan -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
-// RUN: %clangxx_asan -O1 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
-// RUN: %clangxx_asan -O2 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
-// RUN: %clangxx_asan -O3 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O0 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O1 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O2 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O3 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
#include <stdio.h>
#include <wchar.h>
int main() {
- wchar_t *src = L"X means dog";
+ const wchar_t *src = L"X means dog";
wchar_t goodDst[12];
wcscpy(goodDst, src);
wchar_t badDst[7];
- printf("Good so far.\n");
+ fprintf(stderr, "Good so far.\n");
// CHECK: Good so far.
- fflush(stdout);
+ fflush(stderr);
wcscpy(badDst, src); // Boom!
- // CHECK:ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}}
- // CHECK: WRITE of size {{[0-9]+}} at [[ADDR:0x[0-9a-f]+]] thread T0
- // CHECK: #0 [[ADDR:0x[0-9a-f]+]] in wcscpy{{.*}}asan_interceptors.cpp:{{[0-9]+}}
+ // CHECK: ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}}
+ // CHECK: WRITE of size {{[0-9]+}} at [[ADDR]] thread T0
+ // CHECK: #0 {{0x[0-9a-f]+}} in wcscpy
printf("Should have failed with ASAN error.\n");
} \ No newline at end of file
diff --git a/compiler-rt/test/asan/TestCases/wcsncat.cpp b/compiler-rt/test/asan/TestCases/wcsncat.cpp
index 3ab7fc8..eb7d095 100644
--- a/compiler-rt/test/asan/TestCases/wcsncat.cpp
+++ b/compiler-rt/test/asan/TestCases/wcsncat.cpp
@@ -1,14 +1,14 @@
-// RUN: %clangxx_asan -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
-// RUN: %clangxx_asan -O1 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
-// RUN: %clangxx_asan -O2 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
-// RUN: %clangxx_asan -O3 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O0 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O1 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O2 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O3 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
#include <stdio.h>
#include <wchar.h>
int main() {
- wchar_t *start = L"X means ";
- wchar_t *append = L"dog";
+ const wchar_t *start = L"X means ";
+ const wchar_t *append = L"dog";
wchar_t goodDst[15];
wcscpy(goodDst, start);
wcsncat(goodDst, append, 5);
@@ -16,12 +16,12 @@ int main() {
wchar_t badDst[11];
wcscpy(badDst, start);
wcsncat(badDst, append, 1);
- printf("Good so far.\n");
+ fprintf(stderr, "Good so far.\n");
// CHECK: Good so far.
- fflush(stdout);
+ fflush(stderr);
wcsncat(badDst, append, 3); // Boom!
// CHECK: ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}}
- // CHECK: WRITE of size {{[0-9]+}} at [[ADDR:0x[0-9a-f]+]] thread T0
- // CHECK: #0 [[ADDR:0x[0-9a-f]+]] in wcsncat{{.*}}sanitizer_common_interceptors.inc:{{[0-9]+}}
+ // CHECK: WRITE of size {{[0-9]+}} at [[ADDR]] thread T0
+ // CHECK: #0 {{0x[0-9a-f]+}} in wcsncat
printf("Should have failed with ASAN error.\n");
} \ No newline at end of file
diff --git a/compiler-rt/test/asan/TestCases/wcsncpy.cpp b/compiler-rt/test/asan/TestCases/wcsncpy.cpp
index 6177b72..1106bf5 100644
--- a/compiler-rt/test/asan/TestCases/wcsncpy.cpp
+++ b/compiler-rt/test/asan/TestCases/wcsncpy.cpp
@@ -1,25 +1,25 @@
-// RUN: %clangxx_asan -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
-// RUN: %clangxx_asan -O1 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
-// RUN: %clangxx_asan -O2 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
-// RUN: %clangxx_asan -O3 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O0 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O1 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O2 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
+// RUN: %clangxx_asan -O3 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
#include <stdio.h>
#include <wchar.h>
int main() {
- wchar_t *src = L"X means dog";
+ const wchar_t *src = L"X means dog";
wchar_t goodDst[12];
wcsncpy(goodDst, src, 12);
wchar_t badDst[7];
wcsncpy(badDst, src, 7); // This should still work.
- printf("Good so far.\n");
+ fprintf(stderr, "Good so far.\n");
// CHECK: Good so far.
- fflush(stdout);
+ fflush(stderr);
wcsncpy(badDst, src, 15); // Boom!
- // CHECK:ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}}
- // CHECK: WRITE of size {{[0-9]+}} at [[ADDR:0x[0-9a-f]+]] thread T0
- // CHECK: #0 [[ADDR:0x[0-9a-f]+]] in wcsncpy{{.*}}asan_interceptors.cpp:{{[0-9]+}}
+ // CHECK: ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}}
+ // CHECK: WRITE of size {{[0-9]+}} at [[ADDR]] thread T0
+ // CHECK: #0 {{0x[0-9a-f]+}} in wcsncpy
printf("Should have failed with ASAN error.\n");
} \ No newline at end of file
diff --git a/flang-rt/lib/runtime/character.cpp b/flang-rt/lib/runtime/character.cpp
index 98a225d..0f9f419 100644
--- a/flang-rt/lib/runtime/character.cpp
+++ b/flang-rt/lib/runtime/character.cpp
@@ -789,7 +789,7 @@ void RTDEF(LenTrim)(Descriptor &result, const Descriptor &string, int kind,
std::size_t RTDEF(Scan1)(const char *x, std::size_t xLen, const char *set,
std::size_t setLen, bool back) {
- return ScanVerify<char, CharFunc::Scan>(x, xLen, set, setLen, back);
+ return ScanVerify<false>(x, xLen, set, setLen, back);
}
std::size_t RTDEF(Scan2)(const char16_t *x, std::size_t xLen,
const char16_t *set, std::size_t setLen, bool back) {
@@ -873,7 +873,7 @@ void RTDEF(Trim)(Descriptor &result, const Descriptor &string,
std::size_t RTDEF(Verify1)(const char *x, std::size_t xLen, const char *set,
std::size_t setLen, bool back) {
- return ScanVerify<char, CharFunc::Verify>(x, xLen, set, setLen, back);
+ return ScanVerify<true>(x, xLen, set, setLen, back);
}
std::size_t RTDEF(Verify2)(const char16_t *x, std::size_t xLen,
const char16_t *set, std::size_t setLen, bool back) {
diff --git a/flang-rt/lib/runtime/derived-api.cpp b/flang-rt/lib/runtime/derived-api.cpp
index bb08e03..fe68682 100644
--- a/flang-rt/lib/runtime/derived-api.cpp
+++ b/flang-rt/lib/runtime/derived-api.cpp
@@ -118,14 +118,26 @@ bool RTDEF(SameTypeAs)(const Descriptor &a, const Descriptor &b) {
}
bool RTDEF(ExtendsTypeOf)(const Descriptor &a, const Descriptor &mold) {
+ // The wording of the standard indicates null or unallocated checks take
+ // precedence over the extension checks which take precedence over any
+ // compiler specific behavior.
+ // F'23 16.9.86 p 5
+ // If MOLD is unlimited polymorphic and is either a disassociated pointer or
+ // unallocated allocatable variable, the result is true;
auto aType{a.raw().type};
auto moldType{mold.raw().type};
if ((aType != CFI_type_struct && aType != CFI_type_other) ||
(moldType != CFI_type_struct && moldType != CFI_type_other)) {
- // If either type is intrinsic, they must match.
- return aType == moldType;
- } else if (const typeInfo::DerivedType *
- derivedTypeMold{GetDerivedType(mold)}) {
+ if (!mold.IsAllocated()) {
+ return true;
+ } else if (!a.IsAllocated()) {
+ return false;
+ } else {
+ // If either type is intrinsic and not a pointer or allocatable
+ // then they must match.
+ return aType == moldType;
+ }
+ } else if (const auto *derivedTypeMold{GetDerivedType(mold)}) {
// If A is unlimited polymorphic and is either a disassociated pointer or
// unallocated allocatable, the result is false.
// Otherwise if the dynamic type of A or MOLD is extensible, the result is
diff --git a/flang/docs/ComplexOperations.md b/flang/docs/ComplexOperations.md
index 3ebeea5..1b6ec52 100644
--- a/flang/docs/ComplexOperations.md
+++ b/flang/docs/ComplexOperations.md
@@ -93,7 +93,9 @@ While [the same option in clang][2] allows specifying `promoted`, this is not
implemented in Flang. Also, in the case of `improved`, clang does not handle NaN
and infinite values, but Flang does. These behavioral differences arise because
the transformation of complex division calculations depends on the implementation
-of ComplexToStandard, which may change in the future.
+of ComplexToStandard, which may change in the future. If you specify
+`-ffast-math`, the lowering is the same as specifiying
+`-fcomplex-arithmetic=basic`.
[1]: https://discourse.llvm.org/t/rfc-change-lowering-of-fortran-math-intrinsics/63971
[2]: https://clang.llvm.org/docs/UsersManual.html#cmdoption-fcomplex-arithmetic
diff --git a/flang/docs/FlangDriver.md b/flang/docs/FlangDriver.md
index 2b7d9d4..3286171 100644
--- a/flang/docs/FlangDriver.md
+++ b/flang/docs/FlangDriver.md
@@ -573,6 +573,9 @@ documentation for more details.
These correspond to LLVM IR Fast Math attributes:
https://llvm.org/docs/LangRef.html#fast-math-flags
+In addition to the above, `-ffast-math` also enables
+`-fcomplex-arithmetic=basic`.
+
When `-ffast-math` is specified, any linker steps generated by the compiler
driver will also link to `crtfastmath.o`, which adds a static constructor
that sets the FTZ/DAZ bits in MXCSR, affecting not only the current only the
diff --git a/flang/include/flang/Lower/OpenACC.h b/flang/include/flang/Lower/OpenACC.h
index 19d7594..4622dbc 100644
--- a/flang/include/flang/Lower/OpenACC.h
+++ b/flang/include/flang/Lower/OpenACC.h
@@ -77,7 +77,8 @@ static constexpr llvm::StringRef privatizationRecipePrefix = "privatization";
mlir::Value genOpenACCConstruct(AbstractConverter &,
Fortran::semantics::SemanticsContext &,
pft::Evaluation &,
- const parser::OpenACCConstruct &);
+ const parser::OpenACCConstruct &,
+ Fortran::lower::SymMap &localSymbols);
void genOpenACCDeclarativeConstruct(
AbstractConverter &, Fortran::semantics::SemanticsContext &,
StatementContext &, const parser::OpenACCDeclarativeConstruct &);
diff --git a/flang/include/flang/Lower/SymbolMap.h b/flang/include/flang/Lower/SymbolMap.h
index 813df77..e57b6a4 100644
--- a/flang/include/flang/Lower/SymbolMap.h
+++ b/flang/include/flang/Lower/SymbolMap.h
@@ -260,6 +260,10 @@ public:
return lookupSymbol(*sym);
}
+ /// Find a symbol by name and return its value if it appears in the current
+ /// mappings. This lookup is more expensive as it iterates over the map.
+ const semantics::Symbol *lookupSymbolByName(llvm::StringRef symName);
+
/// Find `symbol` and return its value if it appears in the inner-most level
/// map.
SymbolBox shallowLookupSymbol(semantics::SymbolRef sym);
diff --git a/flang/include/flang/Optimizer/Builder/FIRBuilder.h b/flang/include/flang/Optimizer/Builder/FIRBuilder.h
index 4b3087e..d3af3ba 100644
--- a/flang/include/flang/Optimizer/Builder/FIRBuilder.h
+++ b/flang/include/flang/Optimizer/Builder/FIRBuilder.h
@@ -959,6 +959,15 @@ mlir::Value genLifetimeStart(mlir::OpBuilder &builder, mlir::Location loc,
void genLifetimeEnd(mlir::OpBuilder &builder, mlir::Location loc,
mlir::Value mem);
+/// Given a fir.box or fir.class \p box describing an entity and a raw address
+/// \p newAddr for an entity with the same Fortran properties (rank, dynamic
+/// type, length parameters and bounds) and attributes (POINTER or ALLOCATABLE),
+/// create a box for \p newAddr with the same type as \p box. This assumes \p
+/// newAddr is for contiguous storage (\p box does not have to be contiguous).
+mlir::Value getDescriptorWithNewBaseAddress(fir::FirOpBuilder &builder,
+ mlir::Location loc, mlir::Value box,
+ mlir::Value newAddr);
+
} // namespace fir::factory
#endif // FORTRAN_OPTIMIZER_BUILDER_FIRBUILDER_H
diff --git a/flang/include/flang/Semantics/openmp-utils.h b/flang/include/flang/Semantics/openmp-utils.h
index 08b6716..2954a1c 100644
--- a/flang/include/flang/Semantics/openmp-utils.h
+++ b/flang/include/flang/Semantics/openmp-utils.h
@@ -37,6 +37,8 @@ template <typename T, typename U = std::remove_const_t<T>> U AsRvalue(T &t) {
template <typename T> T &&AsRvalue(T &&t) { return std::move(t); }
+const Scope &GetScopingUnit(const Scope &scope);
+
// There is no consistent way to get the source of an ActionStmt, but there
// is "source" in Statement<T>. This structure keeps the ActionStmt with the
// extracted source for further use.
diff --git a/flang/include/flang/Semantics/symbol.h b/flang/include/flang/Semantics/symbol.h
index e90e9c6..a0d5ae7 100644
--- a/flang/include/flang/Semantics/symbol.h
+++ b/flang/include/flang/Semantics/symbol.h
@@ -801,7 +801,7 @@ public:
AccPrivate, AccFirstPrivate, AccShared,
// OpenACC data-mapping attribute
AccCopy, AccCopyIn, AccCopyInReadOnly, AccCopyOut, AccCreate, AccDelete,
- AccPresent, AccLink, AccDeviceResident, AccDevicePtr,
+ AccPresent, AccLink, AccDeviceResident, AccDevicePtr, AccUseDevice,
// OpenACC declare
AccDeclare,
// OpenACC data-movement attribute
diff --git a/flang/include/flang/Support/LangOptions.def b/flang/include/flang/Support/LangOptions.def
index ba72d7b..e7185c8 100644
--- a/flang/include/flang/Support/LangOptions.def
+++ b/flang/include/flang/Support/LangOptions.def
@@ -60,7 +60,8 @@ LANGOPT(OpenMPNoThreadState, 1, 0)
LANGOPT(OpenMPNoNestedParallelism, 1, 0)
/// Use SIMD only OpenMP support.
LANGOPT(OpenMPSimd, 1, false)
-
+/// Enable fast MOD operations for REAL
+LANGOPT(NoFastRealMod, 1, false)
LANGOPT(VScaleMin, 32, 0) ///< Minimum vscale range value
LANGOPT(VScaleMax, 32, 0) ///< Maximum vscale range value
diff --git a/flang/lib/Frontend/CompilerInvocation.cpp b/flang/lib/Frontend/CompilerInvocation.cpp
index 81610ed..548ca67 100644
--- a/flang/lib/Frontend/CompilerInvocation.cpp
+++ b/flang/lib/Frontend/CompilerInvocation.cpp
@@ -1425,6 +1425,9 @@ static bool parseFloatingPointArgs(CompilerInvocation &invoc,
opts.setFPContractMode(Fortran::common::LangOptions::FPM_Fast);
}
+ if (args.hasArg(clang::driver::options::OPT_fno_fast_real_mod))
+ opts.NoFastRealMod = true;
+
return true;
}
diff --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp
index d5e0325..0c630d2 100644
--- a/flang/lib/Frontend/FrontendActions.cpp
+++ b/flang/lib/Frontend/FrontendActions.cpp
@@ -277,6 +277,14 @@ bool CodeGenAction::beginSourceFileAction() {
ci.getInvocation().getLangOpts().OpenMPVersion);
}
+ if (ci.getInvocation().getLangOpts().NoFastRealMod) {
+ mlir::ModuleOp mod = lb.getModule();
+ mod.getOperation()->setAttr(
+ mlir::StringAttr::get(mod.getContext(),
+ llvm::Twine{"fir.no_fast_real_mod"}),
+ mlir::BoolAttr::get(mod.getContext(), true));
+ }
+
// Create a parse tree and lower it to FIR
parseAndLowerTree(ci, lb);
diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp
index 149e51b..780d56f 100644
--- a/flang/lib/Lower/Bridge.cpp
+++ b/flang/lib/Lower/Bridge.cpp
@@ -3182,7 +3182,7 @@ private:
mlir::OpBuilder::InsertPoint insertPt = builder->saveInsertionPoint();
localSymbols.pushScope();
mlir::Value exitCond = genOpenACCConstruct(
- *this, bridge.getSemanticsContext(), getEval(), acc);
+ *this, bridge.getSemanticsContext(), getEval(), acc, localSymbols);
const Fortran::parser::OpenACCLoopConstruct *accLoop =
std::get_if<Fortran::parser::OpenACCLoopConstruct>(&acc.u);
diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp
index 95d0ada..f9b9b850 100644
--- a/flang/lib/Lower/OpenACC.cpp
+++ b/flang/lib/Lower/OpenACC.cpp
@@ -3184,7 +3184,8 @@ genACCHostDataOp(Fortran::lower::AbstractConverter &converter,
Fortran::lower::pft::Evaluation &eval,
Fortran::semantics::SemanticsContext &semanticsContext,
Fortran::lower::StatementContext &stmtCtx,
- const Fortran::parser::AccClauseList &accClauseList) {
+ const Fortran::parser::AccClauseList &accClauseList,
+ Fortran::lower::SymMap &localSymbols) {
mlir::Value ifCond;
llvm::SmallVector<mlir::Value> dataOperands;
bool addIfPresentAttr = false;
@@ -3199,6 +3200,19 @@ genACCHostDataOp(Fortran::lower::AbstractConverter &converter,
} else if (const auto *useDevice =
std::get_if<Fortran::parser::AccClause::UseDevice>(
&clause.u)) {
+ // When CUDA Fotran is enabled, extra symbols are used in the host_data
+ // region. Look for them and bind their values with the symbols in the
+ // outer scope.
+ if (semanticsContext.IsEnabled(Fortran::common::LanguageFeature::CUDA)) {
+ const Fortran::parser::AccObjectList &objectList{useDevice->v};
+ for (const auto &accObject : objectList.v) {
+ Fortran::semantics::Symbol &symbol =
+ getSymbolFromAccObject(accObject);
+ const Fortran::semantics::Symbol *baseSym =
+ localSymbols.lookupSymbolByName(symbol.name().ToString());
+ localSymbols.copySymbolBinding(*baseSym, symbol);
+ }
+ }
genDataOperandOperations<mlir::acc::UseDeviceOp>(
useDevice->v, converter, semanticsContext, stmtCtx, dataOperands,
mlir::acc::DataClause::acc_use_device,
@@ -3239,11 +3253,11 @@ genACCHostDataOp(Fortran::lower::AbstractConverter &converter,
hostDataOp.setIfPresentAttr(builder.getUnitAttr());
}
-static void
-genACC(Fortran::lower::AbstractConverter &converter,
- Fortran::semantics::SemanticsContext &semanticsContext,
- Fortran::lower::pft::Evaluation &eval,
- const Fortran::parser::OpenACCBlockConstruct &blockConstruct) {
+static void genACC(Fortran::lower::AbstractConverter &converter,
+ Fortran::semantics::SemanticsContext &semanticsContext,
+ Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::OpenACCBlockConstruct &blockConstruct,
+ Fortran::lower::SymMap &localSymbols) {
const auto &beginBlockDirective =
std::get<Fortran::parser::AccBeginBlockDirective>(blockConstruct.t);
const auto &blockDirective =
@@ -3273,7 +3287,7 @@ genACC(Fortran::lower::AbstractConverter &converter,
accClauseList);
} else if (blockDirective.v == llvm::acc::ACCD_host_data) {
genACCHostDataOp(converter, currentLocation, eval, semanticsContext,
- stmtCtx, accClauseList);
+ stmtCtx, accClauseList, localSymbols);
}
}
@@ -4647,13 +4661,15 @@ mlir::Value Fortran::lower::genOpenACCConstruct(
Fortran::lower::AbstractConverter &converter,
Fortran::semantics::SemanticsContext &semanticsContext,
Fortran::lower::pft::Evaluation &eval,
- const Fortran::parser::OpenACCConstruct &accConstruct) {
+ const Fortran::parser::OpenACCConstruct &accConstruct,
+ Fortran::lower::SymMap &localSymbols) {
mlir::Value exitCond;
Fortran::common::visit(
common::visitors{
[&](const Fortran::parser::OpenACCBlockConstruct &blockConstruct) {
- genACC(converter, semanticsContext, eval, blockConstruct);
+ genACC(converter, semanticsContext, eval, blockConstruct,
+ localSymbols);
},
[&](const Fortran::parser::OpenACCCombinedConstruct
&combinedConstruct) {
diff --git a/flang/lib/Lower/SymbolMap.cpp b/flang/lib/Lower/SymbolMap.cpp
index 080f21e..78529e0 100644
--- a/flang/lib/Lower/SymbolMap.cpp
+++ b/flang/lib/Lower/SymbolMap.cpp
@@ -45,6 +45,16 @@ Fortran::lower::SymMap::lookupSymbol(Fortran::semantics::SymbolRef symRef) {
return SymbolBox::None{};
}
+const Fortran::semantics::Symbol *
+Fortran::lower::SymMap::lookupSymbolByName(llvm::StringRef symName) {
+ for (auto jmap = symbolMapStack.rbegin(), jend = symbolMapStack.rend();
+ jmap != jend; ++jmap)
+ for (auto const &[sym, symBox] : *jmap)
+ if (sym->name().ToString() == symName)
+ return sym;
+ return nullptr;
+}
+
Fortran::lower::SymbolBox Fortran::lower::SymMap::shallowLookupSymbol(
Fortran::semantics::SymbolRef symRef) {
auto *sym = symRef->HasLocalLocality() ? &*symRef : &symRef->GetUltimate();
diff --git a/flang/lib/Optimizer/Builder/FIRBuilder.cpp b/flang/lib/Optimizer/Builder/FIRBuilder.cpp
index b6501fd..5da27d1 100644
--- a/flang/lib/Optimizer/Builder/FIRBuilder.cpp
+++ b/flang/lib/Optimizer/Builder/FIRBuilder.cpp
@@ -1943,7 +1943,7 @@ void fir::factory::genDimInfoFromBox(
return;
unsigned rank = fir::getBoxRank(boxType);
- assert(rank != 0 && "must be an array of known rank");
+ assert(!boxType.isAssumedRank() && "must be an array of known rank");
mlir::Type idxTy = builder.getIndexType();
for (unsigned i = 0; i < rank; ++i) {
mlir::Value dim = builder.createIntegerConstant(loc, idxTy, i);
@@ -1974,3 +1974,25 @@ void fir::factory::genLifetimeEnd(mlir::OpBuilder &builder, mlir::Location loc,
mlir::Value cast) {
mlir::LLVM::LifetimeEndOp::create(builder, loc, cast);
}
+
+mlir::Value fir::factory::getDescriptorWithNewBaseAddress(
+ fir::FirOpBuilder &builder, mlir::Location loc, mlir::Value box,
+ mlir::Value newAddr) {
+ auto boxType = llvm::dyn_cast<fir::BaseBoxType>(box.getType());
+ assert(boxType &&
+ "expected a box type input in getDescriptorWithNewBaseAddress");
+ if (boxType.isAssumedRank())
+ TODO(loc, "changing descriptor base address for an assumed rank entity");
+ llvm::SmallVector<mlir::Value> lbounds;
+ fir::factory::genDimInfoFromBox(builder, loc, box, &lbounds,
+ /*extents=*/nullptr, /*strides=*/nullptr);
+ fir::BoxValue inputBoxValue(box, lbounds, /*explicitParams=*/{});
+ fir::ExtendedValue openedInput =
+ fir::factory::readBoxValue(builder, loc, inputBoxValue);
+ mlir::Value shape = fir::isArray(openedInput)
+ ? builder.createShape(loc, openedInput)
+ : mlir::Value{};
+ mlir::Value typeMold = fir::isPolymorphicType(boxType) ? box : mlir::Value{};
+ return builder.createBox(loc, boxType, newAddr, shape, /*slice=*/{},
+ fir::getTypeParams(openedInput), typeMold);
+}
diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
index 71d35e3..de7694f 100644
--- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
@@ -6989,8 +6989,33 @@ mlir::Value IntrinsicLibrary::genMergeBits(mlir::Type resultType,
}
// MOD
+static mlir::Value genFastMod(fir::FirOpBuilder &builder, mlir::Location loc,
+ mlir::Value a, mlir::Value p) {
+ auto fastmathFlags = mlir::arith::FastMathFlags::contract;
+ auto fastmathAttr =
+ mlir::arith::FastMathFlagsAttr::get(builder.getContext(), fastmathFlags);
+ mlir::Value divResult =
+ mlir::arith::DivFOp::create(builder, loc, a, p, fastmathAttr);
+ mlir::Type intType = builder.getIntegerType(
+ a.getType().getIntOrFloatBitWidth(), /*signed=*/true);
+ mlir::Value intResult = builder.createConvert(loc, intType, divResult);
+ mlir::Value cnvResult = builder.createConvert(loc, a.getType(), intResult);
+ mlir::Value mulResult =
+ mlir::arith::MulFOp::create(builder, loc, cnvResult, p, fastmathAttr);
+ mlir::Value subResult =
+ mlir::arith::SubFOp::create(builder, loc, a, mulResult, fastmathAttr);
+ return subResult;
+}
+
mlir::Value IntrinsicLibrary::genMod(mlir::Type resultType,
llvm::ArrayRef<mlir::Value> args) {
+ auto mod = builder.getModule();
+ bool dontUseFastRealMod = false;
+ bool canUseApprox = mlir::arith::bitEnumContainsAny(
+ builder.getFastMathFlags(), mlir::arith::FastMathFlags::afn);
+ if (auto attr = mod->getAttrOfType<mlir::BoolAttr>("fir.no_fast_real_mod"))
+ dontUseFastRealMod = attr.getValue();
+
assert(args.size() == 2);
if (resultType.isUnsignedInteger()) {
mlir::Type signlessType = mlir::IntegerType::get(
@@ -7002,9 +7027,16 @@ mlir::Value IntrinsicLibrary::genMod(mlir::Type resultType,
if (mlir::isa<mlir::IntegerType>(resultType))
return mlir::arith::RemSIOp::create(builder, loc, args[0], args[1]);
- // Use runtime.
- return builder.createConvert(
- loc, resultType, fir::runtime::genMod(builder, loc, args[0], args[1]));
+ if (resultType.isFloat() && canUseApprox && !dontUseFastRealMod) {
+ // Treat MOD as an approximate function and code-gen inline code
+ // instead of calling into the Fortran runtime library.
+ return builder.createConvert(loc, resultType,
+ genFastMod(builder, loc, args[0], args[1]));
+ } else {
+ // Use runtime.
+ return builder.createConvert(
+ loc, resultType, fir::runtime::genMod(builder, loc, args[0], args[1]));
+ }
}
// MODULO
diff --git a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp
index 57be863..e595e61 100644
--- a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp
+++ b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp
@@ -41,7 +41,9 @@
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
+#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstddef>
#include <iterator>
@@ -75,6 +77,112 @@ class MapInfoFinalizationPass
/// | |
std::map<mlir::Operation *, mlir::Value> localBoxAllocas;
+ /// Return true if the given path exists in a list of paths.
+ static bool
+ containsPath(const llvm::SmallVectorImpl<llvm::SmallVector<int64_t>> &paths,
+ llvm::ArrayRef<int64_t> path) {
+ return llvm::any_of(paths, [&](const llvm::SmallVector<int64_t> &p) {
+ return p.size() == path.size() &&
+ std::equal(p.begin(), p.end(), path.begin());
+ });
+ }
+
+ /// Return true if the given path is already present in
+ /// op.getMembersIndexAttr().
+ static bool mappedIndexPathExists(mlir::omp::MapInfoOp op,
+ llvm::ArrayRef<int64_t> indexPath) {
+ if (mlir::ArrayAttr attr = op.getMembersIndexAttr()) {
+ for (mlir::Attribute list : attr) {
+ auto listAttr = mlir::cast<mlir::ArrayAttr>(list);
+ if (listAttr.size() != indexPath.size())
+ continue;
+ bool allEq = true;
+ for (auto [i, val] : llvm::enumerate(listAttr)) {
+ if (mlir::cast<mlir::IntegerAttr>(val).getInt() != indexPath[i]) {
+ allEq = false;
+ break;
+ }
+ }
+ if (allEq)
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /// Build a compact string key for an index path for set-based
+ /// deduplication. Format: "N:v0,v1,..." where N is the length.
+ static void buildPathKey(llvm::ArrayRef<int64_t> path,
+ llvm::SmallString<64> &outKey) {
+ outKey.clear();
+ llvm::raw_svector_ostream os(outKey);
+ os << path.size() << ':';
+ for (size_t i = 0; i < path.size(); ++i) {
+ if (i)
+ os << ',';
+ os << path[i];
+ }
+ }
+
+ /// Create the member map for coordRef and append it (and its index
+ /// path) to the provided new* vectors, if it is not already present.
+ void appendMemberMapIfNew(
+ mlir::omp::MapInfoOp op, fir::FirOpBuilder &builder, mlir::Location loc,
+ mlir::Value coordRef, llvm::ArrayRef<int64_t> indexPath,
+ llvm::StringRef memberName,
+ llvm::SmallVectorImpl<mlir::Value> &newMapOpsForFields,
+ llvm::SmallVectorImpl<llvm::SmallVector<int64_t>> &newMemberIndexPaths) {
+ // Local de-dup within this op invocation.
+ if (containsPath(newMemberIndexPaths, indexPath))
+ return;
+ // Global de-dup against already present member indices.
+ if (mappedIndexPathExists(op, indexPath))
+ return;
+
+ if (op.getMapperId()) {
+ mlir::omp::DeclareMapperOp symbol =
+ mlir::SymbolTable::lookupNearestSymbolFrom<
+ mlir::omp::DeclareMapperOp>(op, op.getMapperIdAttr());
+ assert(symbol && "missing symbol for declare mapper identifier");
+ mlir::omp::DeclareMapperInfoOp mapperInfo = symbol.getDeclareMapperInfo();
+ // TODO: Probably a way to cache these keys in someway so we don't
+ // constantly go through the process of rebuilding them on every check, to
+ // save some cycles, but it can wait for a subsequent patch.
+ for (auto v : mapperInfo.getMapVars()) {
+ mlir::omp::MapInfoOp map =
+ mlir::cast<mlir::omp::MapInfoOp>(v.getDefiningOp());
+ if (!map.getMembers().empty() && mappedIndexPathExists(map, indexPath))
+ return;
+ }
+ }
+
+ builder.setInsertionPoint(op);
+ fir::factory::AddrAndBoundsInfo info = fir::factory::getDataOperandBaseAddr(
+ builder, coordRef, /*isOptional=*/false, loc);
+ llvm::SmallVector<mlir::Value> bounds = fir::factory::genImplicitBoundsOps<
+ mlir::omp::MapBoundsOp, mlir::omp::MapBoundsType>(
+ builder, info,
+ hlfir::translateToExtendedValue(loc, builder, hlfir::Entity{coordRef})
+ .first,
+ /*dataExvIsAssumedSize=*/false, loc);
+
+ mlir::omp::MapInfoOp fieldMapOp = mlir::omp::MapInfoOp::create(
+ builder, loc, coordRef.getType(), coordRef,
+ mlir::TypeAttr::get(fir::unwrapRefType(coordRef.getType())),
+ op.getMapTypeAttr(),
+ builder.getAttr<mlir::omp::VariableCaptureKindAttr>(
+ mlir::omp::VariableCaptureKind::ByRef),
+ /*varPtrPtr=*/mlir::Value{}, /*members=*/mlir::ValueRange{},
+ /*members_index=*/mlir::ArrayAttr{}, bounds,
+ /*mapperId=*/mlir::FlatSymbolRefAttr(),
+ builder.getStringAttr(op.getNameAttr().strref() + "." + memberName +
+ ".implicit_map"),
+ /*partial_map=*/builder.getBoolAttr(false));
+
+ newMapOpsForFields.emplace_back(fieldMapOp);
+ newMemberIndexPaths.emplace_back(indexPath.begin(), indexPath.end());
+ }
+
/// getMemberUserList gathers all users of a particular MapInfoOp that are
/// other MapInfoOp's and places them into the mapMemberUsers list, which
/// records the map that the current argument MapInfoOp "op" is part of
@@ -363,7 +471,7 @@ class MapInfoFinalizationPass
mlir::ArrayAttr newMembersAttr;
mlir::SmallVector<mlir::Value> newMembers;
llvm::SmallVector<llvm::SmallVector<int64_t>> memberIndices;
- bool IsHasDeviceAddr = isHasDeviceAddr(op, target);
+ bool isHasDeviceAddrFlag = isHasDeviceAddr(op, target);
if (!mapMemberUsers.empty() || !op.getMembers().empty())
getMemberIndicesAsVectors(
@@ -406,7 +514,7 @@ class MapInfoFinalizationPass
mapUser.parent.getMembersMutable().assign(newMemberOps);
mapUser.parent.setMembersIndexAttr(
builder.create2DI64ArrayAttr(memberIndices));
- } else if (!IsHasDeviceAddr) {
+ } else if (!isHasDeviceAddrFlag) {
auto baseAddr =
genBaseAddrMap(descriptor, op.getBounds(), op.getMapType(), builder);
newMembers.push_back(baseAddr);
@@ -429,7 +537,7 @@ class MapInfoFinalizationPass
// The contents of the descriptor (the base address in particular) will
// remain unchanged though.
uint64_t mapType = op.getMapType();
- if (IsHasDeviceAddr) {
+ if (isHasDeviceAddrFlag) {
mapType |= llvm::to_underlying(
llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_ALWAYS);
}
@@ -701,94 +809,134 @@ class MapInfoFinalizationPass
auto recordType = mlir::cast<fir::RecordType>(underlyingType);
llvm::SmallVector<mlir::Value> newMapOpsForFields;
- llvm::SmallVector<int64_t> fieldIndicies;
+ llvm::SmallVector<llvm::SmallVector<int64_t>> newMemberIndexPaths;
+ // 1) Handle direct top-level allocatable fields.
for (auto fieldMemTyPair : recordType.getTypeList()) {
auto &field = fieldMemTyPair.first;
auto memTy = fieldMemTyPair.second;
- bool shouldMapField =
- llvm::find_if(mapVarForwardSlice, [&](mlir::Operation *sliceOp) {
- if (!fir::isAllocatableType(memTy))
- return false;
-
- auto designateOp = mlir::dyn_cast<hlfir::DesignateOp>(sliceOp);
- if (!designateOp)
- return false;
-
- return designateOp.getComponent() &&
- designateOp.getComponent()->strref() == field;
- }) != mapVarForwardSlice.end();
-
- // TODO Handle recursive record types. Adapting
- // `createParentSymAndGenIntermediateMaps` to work direclty on MLIR
- // entities might be helpful here.
-
- if (!shouldMapField)
+ if (!fir::isAllocatableType(memTy))
continue;
- int32_t fieldIdx = recordType.getFieldIndex(field);
- bool alreadyMapped = [&]() {
- if (op.getMembersIndexAttr())
- for (auto indexList : op.getMembersIndexAttr()) {
- auto indexListAttr = mlir::cast<mlir::ArrayAttr>(indexList);
- if (indexListAttr.size() == 1 &&
- mlir::cast<mlir::IntegerAttr>(indexListAttr[0]).getInt() ==
- fieldIdx)
- return true;
- }
-
- return false;
- }();
-
- if (alreadyMapped)
+ bool referenced = llvm::any_of(mapVarForwardSlice, [&](auto *opv) {
+ auto designateOp = mlir::dyn_cast<hlfir::DesignateOp>(opv);
+ return designateOp && designateOp.getComponent() &&
+ designateOp.getComponent()->strref() == field;
+ });
+ if (!referenced)
continue;
+ int32_t fieldIdx = recordType.getFieldIndex(field);
builder.setInsertionPoint(op);
fir::IntOrValue idxConst =
mlir::IntegerAttr::get(builder.getI32Type(), fieldIdx);
auto fieldCoord = fir::CoordinateOp::create(
builder, op.getLoc(), builder.getRefType(memTy), op.getVarPtr(),
llvm::SmallVector<fir::IntOrValue, 1>{idxConst});
- fir::factory::AddrAndBoundsInfo info =
- fir::factory::getDataOperandBaseAddr(
- builder, fieldCoord, /*isOptional=*/false, op.getLoc());
- llvm::SmallVector<mlir::Value> bounds =
- fir::factory::genImplicitBoundsOps<mlir::omp::MapBoundsOp,
- mlir::omp::MapBoundsType>(
- builder, info,
- hlfir::translateToExtendedValue(op.getLoc(), builder,
- hlfir::Entity{fieldCoord})
- .first,
- /*dataExvIsAssumedSize=*/false, op.getLoc());
-
- mlir::omp::MapInfoOp fieldMapOp = mlir::omp::MapInfoOp::create(
- builder, op.getLoc(), fieldCoord.getResult().getType(),
- fieldCoord.getResult(),
- mlir::TypeAttr::get(
- fir::unwrapRefType(fieldCoord.getResult().getType())),
- op.getMapTypeAttr(),
- builder.getAttr<mlir::omp::VariableCaptureKindAttr>(
- mlir::omp::VariableCaptureKind::ByRef),
- /*varPtrPtr=*/mlir::Value{}, /*members=*/mlir::ValueRange{},
- /*members_index=*/mlir::ArrayAttr{}, bounds,
- /*mapperId=*/mlir::FlatSymbolRefAttr(),
- builder.getStringAttr(op.getNameAttr().strref() + "." + field +
- ".implicit_map"),
- /*partial_map=*/builder.getBoolAttr(false));
- newMapOpsForFields.emplace_back(fieldMapOp);
- fieldIndicies.emplace_back(fieldIdx);
+ int64_t fieldIdx64 = static_cast<int64_t>(fieldIdx);
+ llvm::SmallVector<int64_t, 1> idxPath{fieldIdx64};
+ appendMemberMapIfNew(op, builder, op.getLoc(), fieldCoord, idxPath,
+ field, newMapOpsForFields, newMemberIndexPaths);
+ }
+
+ // Handle nested allocatable fields along any component chain
+ // referenced in the region via HLFIR designates.
+ llvm::SmallVector<llvm::SmallVector<int64_t>> seenIndexPaths;
+ for (mlir::Operation *sliceOp : mapVarForwardSlice) {
+ auto designateOp = mlir::dyn_cast<hlfir::DesignateOp>(sliceOp);
+ if (!designateOp || !designateOp.getComponent())
+ continue;
+ llvm::SmallVector<llvm::StringRef> compPathReversed;
+ compPathReversed.push_back(designateOp.getComponent()->strref());
+ mlir::Value curBase = designateOp.getMemref();
+ bool rootedAtMapArg = false;
+ while (true) {
+ if (auto parentDes = curBase.getDefiningOp<hlfir::DesignateOp>()) {
+ if (!parentDes.getComponent())
+ break;
+ compPathReversed.push_back(parentDes.getComponent()->strref());
+ curBase = parentDes.getMemref();
+ continue;
+ }
+ if (auto decl = curBase.getDefiningOp<hlfir::DeclareOp>()) {
+ if (auto barg =
+ mlir::dyn_cast<mlir::BlockArgument>(decl.getMemref()))
+ rootedAtMapArg = (barg == opBlockArg);
+ } else if (auto blockArg =
+ mlir::dyn_cast_or_null<mlir::BlockArgument>(
+ curBase)) {
+ rootedAtMapArg = (blockArg == opBlockArg);
+ }
+ break;
+ }
+ // Only process nested paths (2+ components). Single-component paths
+ // for direct fields are handled above.
+ if (!rootedAtMapArg || compPathReversed.size() < 2)
+ continue;
+ builder.setInsertionPoint(op);
+ llvm::SmallVector<int64_t> indexPath;
+ mlir::Type curTy = underlyingType;
+ mlir::Value coordRef = op.getVarPtr();
+ bool validPath = true;
+ for (llvm::StringRef compName : llvm::reverse(compPathReversed)) {
+ auto recTy = mlir::dyn_cast<fir::RecordType>(curTy);
+ if (!recTy) {
+ validPath = false;
+ break;
+ }
+ int32_t idx = recTy.getFieldIndex(compName);
+ if (idx < 0) {
+ validPath = false;
+ break;
+ }
+ indexPath.push_back(idx);
+ mlir::Type memTy = recTy.getType(idx);
+ fir::IntOrValue idxConst =
+ mlir::IntegerAttr::get(builder.getI32Type(), idx);
+ coordRef = fir::CoordinateOp::create(
+ builder, op.getLoc(), builder.getRefType(memTy), coordRef,
+ llvm::SmallVector<fir::IntOrValue, 1>{idxConst});
+ curTy = memTy;
+ }
+ if (!validPath)
+ continue;
+ if (auto finalRefTy =
+ mlir::dyn_cast<fir::ReferenceType>(coordRef.getType())) {
+ mlir::Type eleTy = finalRefTy.getElementType();
+ if (fir::isAllocatableType(eleTy)) {
+ if (!containsPath(seenIndexPaths, indexPath)) {
+ seenIndexPaths.emplace_back(indexPath.begin(), indexPath.end());
+ appendMemberMapIfNew(op, builder, op.getLoc(), coordRef,
+ indexPath, compPathReversed.front(),
+ newMapOpsForFields, newMemberIndexPaths);
+ }
+ }
+ }
}
if (newMapOpsForFields.empty())
return mlir::WalkResult::advance();
- op.getMembersMutable().append(newMapOpsForFields);
+ // Deduplicate by index path to avoid emitting duplicate members for
+ // the same component. Use a set-based key to keep this near O(n).
+ llvm::SmallVector<mlir::Value> dedupMapOps;
+ llvm::SmallVector<llvm::SmallVector<int64_t>> dedupIndexPaths;
+ llvm::StringSet<> seenKeys;
+ for (auto [i, mapOp] : llvm::enumerate(newMapOpsForFields)) {
+ const auto &path = newMemberIndexPaths[i];
+ llvm::SmallString<64> key;
+ buildPathKey(path, key);
+ if (seenKeys.contains(key))
+ continue;
+ seenKeys.insert(key);
+ dedupMapOps.push_back(mapOp);
+ dedupIndexPaths.emplace_back(path.begin(), path.end());
+ }
+ op.getMembersMutable().append(dedupMapOps);
llvm::SmallVector<llvm::SmallVector<int64_t>> newMemberIndices;
- mlir::ArrayAttr oldMembersIdxAttr = op.getMembersIndexAttr();
-
- if (oldMembersIdxAttr)
- for (mlir::Attribute indexList : oldMembersIdxAttr) {
+ if (mlir::ArrayAttr oldAttr = op.getMembersIndexAttr())
+ for (mlir::Attribute indexList : oldAttr) {
llvm::SmallVector<int64_t> listVec;
for (mlir::Attribute index : mlir::cast<mlir::ArrayAttr>(indexList))
@@ -796,10 +944,8 @@ class MapInfoFinalizationPass
newMemberIndices.emplace_back(std::move(listVec));
}
-
- for (int64_t newFieldIdx : fieldIndicies)
- newMemberIndices.emplace_back(
- llvm::SmallVector<int64_t>(1, newFieldIdx));
+ for (auto &path : dedupIndexPaths)
+ newMemberIndices.emplace_back(path);
op.setMembersIndexAttr(builder.create2DI64ArrayAttr(newMemberIndices));
op.setPartialMap(true);
diff --git a/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp b/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp
index a7e4723..00fdb5a 100644
--- a/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp
+++ b/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp
@@ -682,10 +682,11 @@ mlir::LLVM::DITypeAttr DebugTypeGenerator::convertPointerLikeType(
static mlir::StringAttr getBasicTypeName(mlir::MLIRContext *context,
llvm::StringRef baseName,
unsigned bitSize) {
- std::string name(baseName.str());
+ std::ostringstream oss;
+ oss << baseName.str();
if (bitSize != 32)
- name += "*" + std::to_string(bitSize / 8);
- return mlir::StringAttr::get(context, name);
+ oss << "(kind=" << (bitSize / 8) << ")";
+ return mlir::StringAttr::get(context, oss.str());
}
mlir::LLVM::DITypeAttr
diff --git a/flang/lib/Parser/parsing.cpp b/flang/lib/Parser/parsing.cpp
index 8a8c6ef..2df6881 100644
--- a/flang/lib/Parser/parsing.cpp
+++ b/flang/lib/Parser/parsing.cpp
@@ -85,6 +85,7 @@ const SourceFile *Parsing::Prescan(const std::string &path, Options options) {
if (options.features.IsEnabled(LanguageFeature::OpenACC) ||
(options.prescanAndReformat && noneOfTheAbove)) {
prescanner.AddCompilerDirectiveSentinel("$acc");
+ prescanner.AddCompilerDirectiveSentinel("@acc");
}
if (options.features.IsEnabled(LanguageFeature::OpenMP) ||
(options.prescanAndReformat && noneOfTheAbove)) {
diff --git a/flang/lib/Parser/prescan.cpp b/flang/lib/Parser/prescan.cpp
index 865c149..66e5b2c 100644
--- a/flang/lib/Parser/prescan.cpp
+++ b/flang/lib/Parser/prescan.cpp
@@ -147,6 +147,11 @@ void Prescanner::Statement() {
directiveSentinel_[4] == '\0') {
// CUDA conditional compilation line.
condOffset = 5;
+ } else if (directiveSentinel_[0] == '@' && directiveSentinel_[1] == 'a' &&
+ directiveSentinel_[2] == 'c' && directiveSentinel_[3] == 'c' &&
+ directiveSentinel_[4] == '\0') {
+ // OpenACC conditional compilation line.
+ condOffset = 5;
}
if (condOffset && !preprocessingOnly_) {
at_ += *condOffset, column_ += *condOffset;
diff --git a/flang/lib/Semantics/check-declarations.cpp b/flang/lib/Semantics/check-declarations.cpp
index 1049a6d2..7b88100 100644
--- a/flang/lib/Semantics/check-declarations.cpp
+++ b/flang/lib/Semantics/check-declarations.cpp
@@ -1189,7 +1189,8 @@ void CheckHelper::CheckObjectEntity(
}
} else if (!subpDetails && symbol.owner().kind() != Scope::Kind::Module &&
symbol.owner().kind() != Scope::Kind::MainProgram &&
- symbol.owner().kind() != Scope::Kind::BlockConstruct) {
+ symbol.owner().kind() != Scope::Kind::BlockConstruct &&
+ symbol.owner().kind() != Scope::Kind::OpenACCConstruct) {
messages_.Say(
"ATTRIBUTES(%s) may apply only to module, host subprogram, block, or device subprogram data"_err_en_US,
parser::ToUpperCaseLetters(common::EnumToString(attr)));
diff --git a/flang/lib/Semantics/check-omp-structure.cpp b/flang/lib/Semantics/check-omp-structure.cpp
index e224e06..1f059f747 100644
--- a/flang/lib/Semantics/check-omp-structure.cpp
+++ b/flang/lib/Semantics/check-omp-structure.cpp
@@ -1361,9 +1361,19 @@ void OmpStructureChecker::Enter(const parser::OpenMPDeclareSimdConstruct &x) {
return;
}
+ auto isValidSymbol{[](const Symbol *sym) {
+ if (IsProcedure(*sym) || IsFunction(*sym)) {
+ return true;
+ }
+ if (const Symbol *owner{GetScopingUnit(sym->owner()).symbol()}) {
+ return IsProcedure(*owner) || IsFunction(*owner);
+ }
+ return false;
+ }};
+
const parser::OmpArgument &arg{args.v.front()};
if (auto *sym{GetArgumentSymbol(arg)}) {
- if (!IsProcedure(*sym) && !IsFunction(*sym)) {
+ if (!isValidSymbol(sym)) {
auto &msg{context_.Say(arg.source,
"The name '%s' should refer to a procedure"_err_en_US, sym->name())};
if (sym->test(Symbol::Flag::Implicit)) {
diff --git a/flang/lib/Semantics/openmp-utils.cpp b/flang/lib/Semantics/openmp-utils.cpp
index 35b7718..a8ec4d6 100644
--- a/flang/lib/Semantics/openmp-utils.cpp
+++ b/flang/lib/Semantics/openmp-utils.cpp
@@ -41,6 +41,24 @@
namespace Fortran::semantics::omp {
using namespace Fortran::parser::omp;
+const Scope &GetScopingUnit(const Scope &scope) {
+ const Scope *iter{&scope};
+ for (; !iter->IsTopLevel(); iter = &iter->parent()) {
+ switch (iter->kind()) {
+ case Scope::Kind::BlockConstruct:
+ case Scope::Kind::BlockData:
+ case Scope::Kind::DerivedType:
+ case Scope::Kind::MainProgram:
+ case Scope::Kind::Module:
+ case Scope::Kind::Subprogram:
+ return *iter;
+ default:
+ break;
+ }
+ }
+ return *iter;
+}
+
SourcedActionStmt GetActionStmt(const parser::ExecutionPartConstruct *x) {
if (x == nullptr) {
return SourcedActionStmt{};
diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp
index bd7b8ac..624b890 100644
--- a/flang/lib/Semantics/resolve-directives.cpp
+++ b/flang/lib/Semantics/resolve-directives.cpp
@@ -328,6 +328,11 @@ public:
return false;
}
+ bool Pre(const parser::AccClause::UseDevice &x) {
+ ResolveAccObjectList(x.v, Symbol::Flag::AccUseDevice);
+ return false;
+ }
+
void Post(const parser::Name &);
private:
@@ -379,24 +384,6 @@ public:
explicit OmpAttributeVisitor(SemanticsContext &context)
: DirectiveAttributeVisitor(context) {}
- static const Scope &scopingUnit(const Scope &scope) {
- const Scope *iter{&scope};
- for (; !iter->IsTopLevel(); iter = &iter->parent()) {
- switch (iter->kind()) {
- case Scope::Kind::BlockConstruct:
- case Scope::Kind::BlockData:
- case Scope::Kind::DerivedType:
- case Scope::Kind::MainProgram:
- case Scope::Kind::Module:
- case Scope::Kind::Subprogram:
- return *iter;
- default:
- break;
- }
- }
- return *iter;
- }
-
template <typename A> void Walk(const A &x) { parser::Walk(x, *this); }
template <typename A> bool Pre(const A &) { return true; }
template <typename A> void Post(const A &) {}
@@ -2303,14 +2290,17 @@ void OmpAttributeVisitor::CheckPerfectNestAndRectangularLoop(
}
auto checkPerfectNest = [&, this]() {
- auto blockSize = block.size();
- if (blockSize <= 1)
+ if (block.empty())
return;
+ auto last = block.end();
+ --last;
- if (parser::Unwrap<parser::ContinueStmt>(x))
- blockSize -= 1;
+ // A trailing CONTINUE is not considered part of the loop body
+ if (parser::Unwrap<parser::ContinueStmt>(*last))
+ --last;
- if (blockSize <= 1)
+ // In a perfectly nested loop, the nested loop must be the only statement
+ if (last == block.begin())
return;
// Non-perfectly nested loop
@@ -3086,8 +3076,8 @@ void OmpAttributeVisitor::ResolveOmpDesignator(
checkScope = ompFlag == Symbol::Flag::OmpExecutableAllocateDirective;
}
if (checkScope) {
- if (scopingUnit(GetContext().scope) !=
- scopingUnit(symbol->GetUltimate().owner())) {
+ if (omp::GetScopingUnit(GetContext().scope) !=
+ omp::GetScopingUnit(symbol->GetUltimate().owner())) {
context_.Say(designator.source, // 2.15.3
"List items must be declared in the same scoping unit in which the %s directive appears"_err_en_US,
parser::ToUpperCaseLetters(
diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp
index d1150a9..5041a6a 100644
--- a/flang/lib/Semantics/resolve-names.cpp
+++ b/flang/lib/Semantics/resolve-names.cpp
@@ -1387,6 +1387,8 @@ private:
// Create scopes for OpenACC constructs
class AccVisitor : public virtual DeclarationVisitor {
public:
+ explicit AccVisitor(SemanticsContext &context) : context_{context} {}
+
void AddAccSourceRange(const parser::CharBlock &);
static bool NeedsScope(const parser::OpenACCBlockConstruct &);
@@ -1395,6 +1397,7 @@ public:
void Post(const parser::OpenACCBlockConstruct &);
bool Pre(const parser::OpenACCCombinedConstruct &);
void Post(const parser::OpenACCCombinedConstruct &);
+ bool Pre(const parser::AccClause::UseDevice &x);
bool Pre(const parser::AccBeginBlockDirective &x) {
AddAccSourceRange(x.source);
return true;
@@ -1430,6 +1433,11 @@ public:
void Post(const parser::AccBeginLoopDirective &x) {
messageHandler().set_currStmtSource(std::nullopt);
}
+
+ void CopySymbolWithDevice(const parser::Name *name);
+
+private:
+ SemanticsContext &context_;
};
bool AccVisitor::NeedsScope(const parser::OpenACCBlockConstruct &x) {
@@ -1459,6 +1467,60 @@ bool AccVisitor::Pre(const parser::OpenACCBlockConstruct &x) {
return true;
}
+void AccVisitor::CopySymbolWithDevice(const parser::Name *name) {
+ // When CUDA Fortran is enabled together with OpenACC, new
+ // symbols are created for the one appearing in the use_device
+ // clause. These new symbols have the CUDA Fortran device
+ // attribute.
+ if (context_.languageFeatures().IsEnabled(common::LanguageFeature::CUDA)) {
+ name->symbol = currScope().CopySymbol(*name->symbol);
+ if (auto *object{name->symbol->detailsIf<ObjectEntityDetails>()}) {
+ object->set_cudaDataAttr(common::CUDADataAttr::Device);
+ }
+ }
+}
+
+bool AccVisitor::Pre(const parser::AccClause::UseDevice &x) {
+ for (const auto &accObject : x.v.v) {
+ common::visit(
+ common::visitors{
+ [&](const parser::Designator &designator) {
+ if (const auto *name{
+ semantics::getDesignatorNameIfDataRef(designator)}) {
+ Symbol *prev{currScope().FindSymbol(name->source)};
+ if (prev != name->symbol) {
+ name->symbol = prev;
+ }
+ CopySymbolWithDevice(name);
+ } else {
+ if (const auto *dataRef{
+ std::get_if<parser::DataRef>(&designator.u)}) {
+ using ElementIndirection =
+ common::Indirection<parser::ArrayElement>;
+ if (auto *ind{std::get_if<ElementIndirection>(&dataRef->u)}) {
+ const parser::ArrayElement &arrayElement{ind->value()};
+ Walk(arrayElement.subscripts);
+ const parser::DataRef &base{arrayElement.base};
+ if (auto *name{std::get_if<parser::Name>(&base.u)}) {
+ Symbol *prev{currScope().FindSymbol(name->source)};
+ if (prev != name->symbol) {
+ name->symbol = prev;
+ }
+ CopySymbolWithDevice(name);
+ }
+ }
+ }
+ }
+ },
+ [&](const parser::Name &name) {
+ // TODO: common block in use_device?
+ },
+ },
+ accObject.u);
+ }
+ return false;
+}
+
void AccVisitor::Post(const parser::OpenACCBlockConstruct &x) {
if (NeedsScope(x)) {
PopScope();
@@ -2038,7 +2100,8 @@ public:
ResolveNamesVisitor(
SemanticsContext &context, ImplicitRulesMap &rules, Scope &top)
- : BaseVisitor{context, *this, rules}, topScope_{top} {
+ : BaseVisitor{context, *this, rules}, AccVisitor(context),
+ topScope_{top} {
PushScope(top);
}
diff --git a/flang/test/Driver/complex-range.f90 b/flang/test/Driver/complex-range.f90
index e5a1ba9..575fa04 100644
--- a/flang/test/Driver/complex-range.f90
+++ b/flang/test/Driver/complex-range.f90
@@ -15,6 +15,83 @@
! RUN: not %flang -### -fcomplex-arithmetic=foo -c %s 2>&1 \
! RUN: | FileCheck %s --check-prefix=ERR
+! RUN: %flang -### -ffast-math -c %s 2>&1 \
+! RUN: | FileCheck %s --check-prefix=BASIC
+
+! RUN: %flang -### -fno-fast-math -c %s 2>&1 \
+! RUN: | FileCheck %s --check-prefix=RANGE
+
+! RUN: %flang -### -Werror -ffast-math -fno-fast-math -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=RANGE %s
+
+! RUN: %flang -### -ffast-math -fcomplex-arithmetic=full -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=FULL,ARITH-FULL-OVERRIDING,FAST-OVERRIDDEN %s
+
+! RUN: %flang -### -ffast-math -fcomplex-arithmetic=improved -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=IMPRVD,ARITH-IMPROVED-OVERRIDING,FAST-OVERRIDDEN %s
+
+! RUN: %flang -### -Werror -ffast-math -fcomplex-arithmetic=basic -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=BASIC %s
+
+! RUN: %flang -### -Werror -fno-fast-math -ffast-math -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=BASIC %s
+
+! RUN: %flang -### -Werror -fno-fast-math -fcomplex-arithmetic=full -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=FULL %s
+
+! RUN: %flang -### -Werror -fno-fast-math -fcomplex-arithmetic=improved -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=IMPRVD %s
+
+! RUN: %flang -### -Werror -fno-fast-math -fcomplex-arithmetic=basic -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=BASIC %s
+
+! RUN: %flang -### -fcomplex-arithmetic=full -ffast-math -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=BASIC,FAST-OVERRIDING,ARITH-FULL-OVERRIDDEN %s
+
+! RUN: %flang -### -Werror -fcomplex-arithmetic=full -fno-fast-math -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=RANGE %s
+
+! RUN: %flang -### -Werror -fcomplex-arithmetic=full -fcomplex-arithmetic=improved -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=IMPRVD %s
+
+! RUN: %flang -### -Werror -fcomplex-arithmetic=full -fcomplex-arithmetic=basic -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=BASIC %s
+
+! RUN: %flang -### -fcomplex-arithmetic=improved -ffast-math -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=BASIC,FAST-OVERRIDING,ARITH-IMPROVED-OVERRIDDEN %s
+
+! RUN: %flang -### -fcomplex-arithmetic=improved -fno-fast-math -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=RANGE,NOFAST-OVERRIDING,ARITH-IMPROVED-OVERRIDDEN %s
+
+! RUN: %flang -### -Werror -fcomplex-arithmetic=improved -fcomplex-arithmetic=full -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=FULL %s
+
+! RUN: %flang -### -Werror -fcomplex-arithmetic=improved -fcomplex-arithmetic=basic -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=BASIC %s
+
+! RUN: %flang -### -Werror -fcomplex-arithmetic=basic -ffast-math -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=BASIC %s
+
+! RUN: %flang -### -fcomplex-arithmetic=basic -fno-fast-math -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=RANGE,NOFAST-OVERRIDING,ARITH-BASIC-OVERRIDDEN %s
+
+! RUN: %flang -### -Werror -fcomplex-arithmetic=basic -fcomplex-arithmetic=full -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=FULL %s
+
+! RUN: %flang -### -Werror -fcomplex-arithmetic=basic -fcomplex-arithmetic=improved -c %s 2>&1 \
+! RUN: | FileCheck --check-prefixes=IMPRVD %s
+
+
+! FAST-OVERRIDING: warning: '-ffast-math' sets complex range to "basic"
+! NOFAST-OVERRIDING: warning: '-fno-fast-math' sets complex range to "none"
+! ARITH-FULL-OVERRIDING: warning: '-fcomplex-arithmetic=full' sets complex range to "full"
+! ARITH-IMPROVED-OVERRIDING: warning: '-fcomplex-arithmetic=improved' sets complex range to "improved"
+
+! FAST-OVERRIDDEN: overriding the setting of "basic" that was implied by '-ffast-math' [-Woverriding-complex-range]
+! ARITH-FULL-OVERRIDDEN: overriding the setting of "full" that was implied by '-fcomplex-arithmetic=full' [-Woverriding-complex-range]
+! ARITH-IMPROVED-OVERRIDDEN: overriding the setting of "improved" that was implied by '-fcomplex-arithmetic=improved' [-Woverriding-complex-range]
+! ARITH-BASIC-OVERRIDDEN: overriding the setting of "basic" that was implied by '-fcomplex-arithmetic=basic' [-Woverriding-complex-range]
+
! RANGE-NOT: -complex-range=
! FULL: -complex-range=full
! IMPRVD: -complex-range=improved
diff --git a/flang/test/Driver/fast-real-mod.f90 b/flang/test/Driver/fast-real-mod.f90
new file mode 100644
index 0000000..4ea9b26
--- /dev/null
+++ b/flang/test/Driver/fast-real-mod.f90
@@ -0,0 +1,7 @@
+! RUN: %flang -fno-fast-real-mod -### -c %s 2>&1 | FileCheck %s -check-prefix CHECK-NO-FAST-REAL-MOD
+
+! CHECK-NO-FAST-REAL-MOD: "-fno-fast-real-mod"
+
+program test
+ ! nothing to be done in here
+end program test
diff --git a/flang/test/Integration/debug-complex-1.f90 b/flang/test/Integration/debug-complex-1.f90
index 48ea029..1d70140 100644
--- a/flang/test/Integration/debug-complex-1.f90
+++ b/flang/test/Integration/debug-complex-1.f90
@@ -17,8 +17,8 @@ contains
end program
! CHECK-DAG: ![[C4:.*]] = !DIBasicType(name: "complex", size: 64, encoding: DW_ATE_complex_float)
-! CHECK-DAG: ![[C8:.*]] = !DIBasicType(name: "complex*8", size: 128, encoding: DW_ATE_complex_float)
-! CHECK-DAG: ![[C16:.*]] = !DIBasicType(name: "complex*16", size: 256, encoding: DW_ATE_complex_float)
+! CHECK-DAG: ![[C8:.*]] = !DIBasicType(name: "complex(kind=8)", size: 128, encoding: DW_ATE_complex_float)
+! CHECK-DAG: ![[C16:.*]] = !DIBasicType(name: "complex(kind=16)", size: 256, encoding: DW_ATE_complex_float)
! CHECK-DAG: !DILocalVariable(name: "c4"{{.*}}type: ![[C4]])
! CHECK-DAG: !DILocalVariable(name: "c8"{{.*}}type: ![[C8]])
! CHECK-DAG: !DILocalVariable(name: "r"{{.*}}type: ![[C16]])
diff --git a/flang/test/Integration/debug-local-var-2.f90 b/flang/test/Integration/debug-local-var-2.f90
index 93659a5..e95263e 100644
--- a/flang/test/Integration/debug-local-var-2.f90
+++ b/flang/test/Integration/debug-local-var-2.f90
@@ -40,11 +40,11 @@ program mn
! BOTH-DAG: ![[MAIN:.*]] = distinct !DISubprogram(name: "MN", {{.*}})
! BOTH-DAG: ![[TYI32:.*]] = !DIBasicType(name: "integer", size: 32, encoding: DW_ATE_signed)
-! BOTH-DAG: ![[TYI64:.*]] = !DIBasicType(name: "integer*8", size: 64, encoding: DW_ATE_signed)
-! BOTH-DAG: ![[TYL8:.*]] = !DIBasicType(name: "logical*1", size: 8, encoding: DW_ATE_boolean)
+! BOTH-DAG: ![[TYI64:.*]] = !DIBasicType(name: "integer(kind=8)", size: 64, encoding: DW_ATE_signed)
+! BOTH-DAG: ![[TYL8:.*]] = !DIBasicType(name: "logical(kind=1)", size: 8, encoding: DW_ATE_boolean)
! BOTH-DAG: ![[TYL32:.*]] = !DIBasicType(name: "logical", size: 32, encoding: DW_ATE_boolean)
! BOTH-DAG: ![[TYR32:.*]] = !DIBasicType(name: "real", size: 32, encoding: DW_ATE_float)
-! BOTH-DAG: ![[TYR64:.*]] = !DIBasicType(name: "real*8", size: 64, encoding: DW_ATE_float)
+! BOTH-DAG: ![[TYR64:.*]] = !DIBasicType(name: "real(kind=8)", size: 64, encoding: DW_ATE_float)
! BOTH-DAG: ![[I4]] = !DILocalVariable(name: "i4", scope: ![[MAIN]], file: !{{.*}}, line: [[@LINE+6]], type: ![[TYI32]])
! BOTH-DAG: ![[I8]] = !DILocalVariable(name: "i8", scope: ![[MAIN]], file: !{{.*}}, line: [[@LINE+6]], type: ![[TYI64]])
diff --git a/flang/test/Lower/Intrinsics/fast-real-mod.f90 b/flang/test/Lower/Intrinsics/fast-real-mod.f90
new file mode 100644
index 0000000..f80f720
--- /dev/null
+++ b/flang/test/Lower/Intrinsics/fast-real-mod.f90
@@ -0,0 +1,83 @@
+! RUN: %flang_fc1 -ffast-math -emit-mlir -o - %s | FileCheck %s --check-prefixes=CHECK%if target=x86_64{{.*}} %{,CHECK-KIND10%}%if flang-supports-f128-math %{,CHECK-KIND16%}
+! RUN: %flang_fc1 -ffast-math -fno-fast-real-mod -emit-mlir -o - %s | FileCheck %s --check-prefixes=CHECK-NFRM%if target=x86_64{{.*}} %{,CHECK-NFRM-KIND10%}%if flang-supports-f128-math %{,CHECK-NFRM-KIND16%}
+
+! TODO: check line that fir.fast_real_mod is not there
+! CHECK-NFRM: module attributes {{{.*}}fir.no_fast_real_mod = true{{.*}}}
+
+! CHECK-LABEL: @_QPmod_real4
+subroutine mod_real4(r, a, p)
+ implicit none
+ real(kind=4) :: r, a, p
+! CHECK: %[[A:.*]] = fir.declare{{.*}}a"
+! CHECK: %[[P:.*]] = fir.declare{{.*}}p"
+! CHECK: %[[R:.*]] = fir.declare{{.*}}r"
+! CHECK: %[[A_LOAD:.*]] = fir.load %[[A]]
+! CHECK: %[[P_LOAD:.*]] = fir.load %[[P]]
+! CHECK: %[[DIV:.*]] = arith.divf %[[A_LOAD]], %[[P_LOAD]] fastmath<fast> : f32
+! CHECK: %[[CV1:.*]] = fir.convert %[[DIV]] : (f32) -> si32
+! CHECK: %[[CV2:.*]] = fir.convert %[[CV1]] : (si32) -> f32
+! CHECK: %[[MUL:.*]] = arith.mulf %[[CV2]], %[[P_LOAD]] fastmath<fast> : f32
+! CHECK: %[[SUB:.*]] = arith.subf %[[A_LOAD]], %[[MUL]] fastmath<fast> : f32
+! CHECK: fir.store %[[SUB]] to %[[R]] : !fir.ref<f32>
+! CHECK-NFRM: fir.call @_FortranAModReal4(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (f32, f32, !fir.ref<i8>, i32) -> f32
+ r = mod(a, p)
+end subroutine mod_real4
+
+! CHECK-LABEL: @_QPmod_real8
+subroutine mod_real8(r, a, p)
+ implicit none
+ real(kind=8) :: r, a, p
+! CHECK: %[[A:.*]] = fir.declare{{.*}}a"
+! CHECK: %[[P:.*]] = fir.declare{{.*}}p"
+! CHECK: %[[R:.*]] = fir.declare{{.*}}r"
+! CHECK: %[[A_LOAD:.*]] = fir.load %[[A]]
+! CHECK: %[[P_LOAD:.*]] = fir.load %[[P]]
+! CHECK: %[[DIV:.*]] = arith.divf %[[A_LOAD]], %[[P_LOAD]] fastmath<fast> : f64
+! CHECK: %[[CV1:.*]] = fir.convert %[[DIV]] : (f64) -> si64
+! CHECK: %[[CV2:.*]] = fir.convert %[[CV1]] : (si64) -> f64
+! CHECK: %[[MUL:.*]] = arith.mulf %[[CV2]], %[[P_LOAD]] fastmath<fast> : f64
+! CHECK: %[[SUB:.*]] = arith.subf %[[A_LOAD]], %[[MUL]] fastmath<fast> : f64
+! CHECK: fir.store %[[SUB]] to %[[R]] : !fir.ref<f64>
+! CHECK-NFRM: fir.call @_FortranAModReal8(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (f64, f64, !fir.ref<i8>, i32) -> f64
+ r = mod(a, p)
+end subroutine mod_real8
+
+! CHECK-LABEL: @_QPmod_real10
+subroutine mod_real10(r, a, p)
+ implicit none
+ integer, parameter :: kind10 = merge(10, 4, selected_real_kind(p=18).eq.10)
+ real(kind=kind10) :: r, a, p
+! CHECK-KIND10: %[[A:.*]] = fir.declare{{.*}}a"
+! CHECK-KIND10: %[[P:.*]] = fir.declare{{.*}}p"
+! CHECK-KIND10: %[[R:.*]] = fir.declare{{.*}}r"
+! CHECK-KIND10: %[[A_LOAD:.*]] = fir.load %[[A]]
+! CHECK-KIND10: %[[P_LOAD:.*]] = fir.load %[[P]]
+! CHECK-KIND10: %[[DIV:.*]] = arith.divf %[[A_LOAD]], %[[P_LOAD]] fastmath<fast> : f80
+! CHECK-KIND10: %[[CV1:.*]] = fir.convert %[[DIV]] : (f80) -> si80
+! CHECK-KIND10: %[[CV2:.*]] = fir.convert %[[CV1]] : (si80) -> f80
+! CHECK-KIND10: %[[MUL:.*]] = arith.mulf %[[CV2]], %[[P_LOAD]] fastmath<fast> : f80
+! CHECK-KIND10: %[[SUB:.*]] = arith.subf %[[A_LOAD]], %[[MUL]] fastmath<fast> : f80
+! CHECK-KIND10: fir.store %[[SUB]] to %[[R]] : !fir.ref<f80>
+! CHECK-NFRM-KIND10: fir.call @_FortranAModReal10(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (f80, f80, !fir.ref<i8>, i32) -> f80
+ r = mod(a, p)
+end subroutine mod_real10
+
+! CHECK-LABEL: @_QPmod_real16
+subroutine mod_real16(r, a, p)
+ implicit none
+ integer, parameter :: kind16 = merge(16, 4, selected_real_kind(p=33).eq.16)
+ real(kind=kind16) :: r, a, p
+! CHECK-KIND16: %[[A:.*]] = fir.declare{{.*}}a"
+! CHECK-KIND16: %[[P:.*]] = fir.declare{{.*}}p"
+! CHECK-KIND16: %[[R:.*]] = fir.declare{{.*}}r"
+! CHECK-KIND16: %[[A_LOAD:.*]] = fir.load %[[A]]
+! CHECK-KIND16: %[[P_LOAD:.*]] = fir.load %[[P]]
+! CHECK-KIND16: %[[DIV:.*]] = arith.divf %[[A_LOAD]], %[[P_LOAD]] fastmath<fast> : f128
+! CHECK-KIND16: %[[CV1:.*]] = fir.convert %[[DIV]] : (f128) -> si128
+! CHECK-KIND16: %[[CV2:.*]] = fir.convert %[[CV1]] : (si128) -> f128
+! CHECK-KIND16: %[[MUL:.*]] = arith.mulf %[[CV2]], %[[P_LOAD]] fastmath<fast> : f128
+! CHECK-KIND16: %[[SUB:.*]] = arith.subf %[[A_LOAD]], %[[MUL]] fastmath<fast> : f128
+! CHECK-KIND16: fir.store %[[SUB]] to %[[R]] : !fir.ref<f128>
+! CHECK-NFRM-KIND16: fir.call @_FortranAModReal16(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (f128, f128, !fir.ref<i8>, i32) -> f128
+ r = mod(a, p)
+end subroutine mod_real16
diff --git a/flang/test/Lower/OpenACC/acc-host-data-cuda-device.f90 b/flang/test/Lower/OpenACC/acc-host-data-cuda-device.f90
new file mode 100644
index 0000000..da034ad
--- /dev/null
+++ b/flang/test/Lower/OpenACC/acc-host-data-cuda-device.f90
@@ -0,0 +1,43 @@
+
+! RUN: bbc -fopenacc -fcuda -emit-hlfir %s -o - | FileCheck %s
+
+module m
+
+interface doit
+subroutine __device_sub(a)
+ real(4), device, intent(in) :: a(:,:,:)
+ !dir$ ignore_tkr(c) a
+end
+subroutine __host_sub(a)
+ real(4), intent(in) :: a(:,:,:)
+ !dir$ ignore_tkr(c) a
+end
+end interface
+end module
+
+program testex1
+integer, parameter :: ntimes = 10
+integer, parameter :: ni=128
+integer, parameter :: nj=256
+integer, parameter :: nk=64
+real(4), dimension(ni,nj,nk) :: a
+
+!$acc enter data copyin(a)
+
+block; use m
+!$acc host_data use_device(a)
+do nt = 1, ntimes
+ call doit(a)
+end do
+!$acc end host_data
+end block
+
+block; use m
+do nt = 1, ntimes
+ call doit(a)
+end do
+end block
+end
+
+! CHECK: fir.call @_QP__device_sub
+! CHECK: fir.call @_QP__host_sub
diff --git a/flang/test/Lower/OpenMP/declare-mapper.f90 b/flang/test/Lower/OpenMP/declare-mapper.f90
index 8a98c68..3d4d0da 100644
--- a/flang/test/Lower/OpenMP/declare-mapper.f90
+++ b/flang/test/Lower/OpenMP/declare-mapper.f90
@@ -6,6 +6,7 @@
! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=50 %t/omp-declare-mapper-3.f90 -o - | FileCheck %t/omp-declare-mapper-3.f90
! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=50 %t/omp-declare-mapper-4.f90 -o - | FileCheck %t/omp-declare-mapper-4.f90
! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=50 %t/omp-declare-mapper-5.f90 -o - | FileCheck %t/omp-declare-mapper-5.f90
+! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=51 %t/omp-declare-mapper-6.f90 -o - | FileCheck %t/omp-declare-mapper-6.f90
!--- omp-declare-mapper-1.f90
subroutine declare_mapper_1
@@ -262,3 +263,41 @@ contains
!$omp end target
end subroutine
end program declare_mapper_5
+
+!--- omp-declare-mapper-6.f90
+subroutine declare_mapper_nested_parent
+ type :: inner_t
+ real, allocatable :: deep_arr(:)
+ end type inner_t
+
+ type, abstract :: base_t
+ real, allocatable :: base_arr(:)
+ type(inner_t) :: inner
+ end type base_t
+
+ type, extends(base_t) :: real_t
+ real, allocatable :: real_arr(:)
+ end type real_t
+
+ !$omp declare mapper (custommapper : real_t :: t) map(tofrom: t%base_arr, t%real_arr)
+ ! CHECK: omp.declare_mapper @{{.*custommapper}}
+ ! CHECK-DAG: omp.map.info {{.*}} {name = "t%base_t%base_arr"}
+ ! CHECK-DAG: omp.map.info {{.*}} {name = "t%real_arr"}
+ ! CHECK: omp.declare_mapper.info
+
+ type(real_t) :: r
+
+ allocate(r%base_arr(10))
+ allocate(r%inner%deep_arr(10))
+ allocate(r%real_arr(10))
+ r%base_arr = 1.0
+ r%inner%deep_arr = 4.0
+ r%real_arr = 0.0
+
+ ! Check implicit maps for deep nested allocatable payloads not covered by mapper
+ ! CHECK-DAG: omp.map.info {{.*}} {name = "r.deep_arr.implicit_map"}
+ ! CHECK: omp.target
+ !$omp target map(mapper(custommapper), tofrom: r)
+ r%real_arr = r%base_arr(1) + r%inner%deep_arr(1)
+ !$omp end target
+end subroutine declare_mapper_nested_parent
diff --git a/flang/test/Lower/OpenMP/wsloop-collapse-continue.f90 b/flang/test/Lower/OpenMP/wsloop-collapse-continue.f90
new file mode 100644
index 0000000..fea7a8b
--- /dev/null
+++ b/flang/test/Lower/OpenMP/wsloop-collapse-continue.f90
@@ -0,0 +1,19 @@
+! RUN: bbc -fopenmp -emit-hlfir %s -o - | FileCheck %s
+
+program wsloop_collapse_continue
+ integer i, j
+
+! CHECK: omp.wsloop {{.*}} {
+! CHECK: omp.loop_nest ({{.*}}) : i32 = ({{.*}}) to ({{.*}}) inclusive step ({{.*}}) collapse(2) {
+ !$omp do collapse(2)
+ do 50 i = 1, 42
+ do 51 j = 1, 84
+! CHECK: fir.call @_FortranAioOutputInteger32(
+ print *, i
+! CHECK: fir.call @_FortranAioOutputInteger32(
+ print *, j
+ 51 continue
+ 50 continue
+ !$omp end do
+
+end program wsloop_collapse_continue
diff --git a/flang/test/Semantics/OpenACC/acc-sentinel.f90 b/flang/test/Semantics/OpenACC/acc-sentinel.f90
new file mode 100644
index 0000000..d34d97e
--- /dev/null
+++ b/flang/test/Semantics/OpenACC/acc-sentinel.f90
@@ -0,0 +1,14 @@
+! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenacc
+
+subroutine test_sentinel()
+! Test for error since we currently do not have an OpenACC module upstream.
+!ERROR: Cannot parse module file for module 'openacc': Source file 'openacc.mod' was not found
+ !@acc use openacc
+ integer :: i
+
+ !$acc parallel loop
+ do i = 1, 10
+ end do
+ !$acc end parallel
+
+end subroutine
diff --git a/flang/test/Semantics/OpenMP/declare-simd.f90 b/flang/test/Semantics/OpenMP/declare-simd.f90
index ceed2c2..bb259b8 100644
--- a/flang/test/Semantics/OpenMP/declare-simd.f90
+++ b/flang/test/Semantics/OpenMP/declare-simd.f90
@@ -19,4 +19,9 @@ end
subroutine f01
end
+integer function f02
+!Ok, expect no diagnostics
+!$omp declare_simd(f02)
+end
+
end module
diff --git a/flang/test/Semantics/OpenMP/do08.f90 b/flang/test/Semantics/OpenMP/do08.f90
index bb3c1d0c..5143dff 100644
--- a/flang/test/Semantics/OpenMP/do08.f90
+++ b/flang/test/Semantics/OpenMP/do08.f90
@@ -61,7 +61,6 @@ program omp
!$omp end do
- !ERROR: Canonical loop nest must be perfectly nested.
!ERROR: The value of the parameter in the COLLAPSE or ORDERED clause must not be larger than the number of nested loops following the construct.
!$omp do collapse(3)
do 60 i=2,200,2
diff --git a/flang/test/Semantics/OpenMP/do13.f90 b/flang/test/Semantics/OpenMP/do13.f90
index 8f7844f..6e9d1dd 100644
--- a/flang/test/Semantics/OpenMP/do13.f90
+++ b/flang/test/Semantics/OpenMP/do13.f90
@@ -59,7 +59,6 @@ program omp
!$omp end do
- !ERROR: Canonical loop nest must be perfectly nested.
!ERROR: The value of the parameter in the COLLAPSE or ORDERED clause must not be larger than the number of nested loops following the construct.
!$omp do collapse(3)
do 60 i=1,10
diff --git a/flang/test/Transforms/debug-complex-1.fir b/flang/test/Transforms/debug-complex-1.fir
index 7a288fe..6e2c6c5 100644
--- a/flang/test/Transforms/debug-complex-1.fir
+++ b/flang/test/Transforms/debug-complex-1.fir
@@ -26,9 +26,9 @@ module {
#loc3 = loc("./simple.f90":8:1)
#loc4 = loc("./simple.f90":11:1)
-// CHECK-DAG: #[[CMPX8:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "complex*8", sizeInBits = 128, encoding = DW_ATE_complex_float>
+// CHECK-DAG: #[[CMPX8:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "complex(kind=8)", sizeInBits = 128, encoding = DW_ATE_complex_float>
// CHECK-DAG: #[[CMPX4:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "complex", sizeInBits = 64, encoding = DW_ATE_complex_float>
-// CHECK-DAG: #[[CMPX16:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "complex*16", sizeInBits = 256, encoding = DW_ATE_complex_float>
+// CHECK-DAG: #[[CMPX16:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "complex(kind=16)", sizeInBits = 256, encoding = DW_ATE_complex_float>
// CHECK-DAG: #[[TY1:.*]] = #llvm.di_subroutine_type<{{.*}}types = #[[CMPX8]], #[[CMPX4]]>
// CHECK-DAG: #[[TY2:.*]] = #llvm.di_subroutine_type<{{.*}}types = #[[CMPX16]], #[[CMPX4]]>
diff --git a/flang/test/Transforms/debug-derived-type-1.fir b/flang/test/Transforms/debug-derived-type-1.fir
index 672b6cf..22832b6 100644
--- a/flang/test/Transforms/debug-derived-type-1.fir
+++ b/flang/test/Transforms/debug-derived-type-1.fir
@@ -45,12 +45,12 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<!llvm.ptr<272>, d
// CHECK-DAG: #[[INT_TY:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer", sizeInBits = 32, encoding = DW_ATE_signed>
-// CHECK-DAG: #[[INT8_TY:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer*8", sizeInBits = 64, encoding = DW_ATE_signed>
+// CHECK-DAG: #[[INT8_TY:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer(kind=8)", sizeInBits = 64, encoding = DW_ATE_signed>
// CHECK-DAG: #[[REAL4_TY:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real", sizeInBits = 32, encoding = DW_ATE_float>
// CHECK-DAG: #[[CMX8_TY:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "complex", sizeInBits = 64, encoding = DW_ATE_complex_float>
// CHECK-DAG: #[[CMX_ARR:.*]] = #llvm.di_composite_type<tag = DW_TAG_array_type, baseType = #[[CMX8_TY:.*]]{{.*}}>
-// CHECK-DAG: #[[LOG_TY:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "logical*1", sizeInBits = 8, encoding = DW_ATE_boolean>
-// CHECK-DAG: #[[REAL8_TY:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real*8", sizeInBits = 64, encoding = DW_ATE_float>
+// CHECK-DAG: #[[LOG_TY:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "logical(kind=1)", sizeInBits = 8, encoding = DW_ATE_boolean>
+// CHECK-DAG: #[[REAL8_TY:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real(kind=8)", sizeInBits = 64, encoding = DW_ATE_float>
// CHECK-DAG: #[[STR_TY:.*]] = #llvm.di_string_type
// CHECK-DAG: #[[MOD:.*]] = #llvm.di_module<{{.*}}name = "m_employee"{{.*}}>
// CHECK-DAG: #[[MOD1:.*]] = #llvm.di_module<{{.*}}name = "t1"{{.*}}>
diff --git a/flang/test/Transforms/debug-fn-info.fir b/flang/test/Transforms/debug-fn-info.fir
index d82cef1..e42beb1 100644
--- a/flang/test/Transforms/debug-fn-info.fir
+++ b/flang/test/Transforms/debug-fn-info.fir
@@ -64,10 +64,10 @@ module {
#loc4 = loc("test2.f90":53:22)
-// CHECK-DAG: #[[INT8:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer*8", sizeInBits = 64, encoding = DW_ATE_signed>
+// CHECK-DAG: #[[INT8:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer(kind=8)", sizeInBits = 64, encoding = DW_ATE_signed>
// CHECK-DAG: #[[INT4:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer", sizeInBits = 32, encoding = DW_ATE_signed>
-// CHECK-DAG: #[[REAL8:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real*8", sizeInBits = 64, encoding = DW_ATE_float>
-// CHECK-DAG: #[[LOG1:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "logical*1", sizeInBits = 8, encoding = DW_ATE_boolean>
+// CHECK-DAG: #[[REAL8:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real(kind=8)", sizeInBits = 64, encoding = DW_ATE_float>
+// CHECK-DAG: #[[LOG1:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "logical(kind=1)", sizeInBits = 8, encoding = DW_ATE_boolean>
// CHECK-DAG: #[[REAL4:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real", sizeInBits = 32, encoding = DW_ATE_float>
// CHECK-DAG: #[[LOG4:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "logical", sizeInBits = 32, encoding = DW_ATE_boolean>
// CHECK: #[[TY0:.*]] = #llvm.di_subroutine_type<callingConvention = DW_CC_program, types = #di_null_type>
diff --git a/flang/test/Transforms/debug-local-var.fir b/flang/test/Transforms/debug-local-var.fir
index 466f79c..d39017e 100644
--- a/flang/test/Transforms/debug-local-var.fir
+++ b/flang/test/Transforms/debug-local-var.fir
@@ -71,10 +71,10 @@ module {
#loc15 = loc("test.f90":21:24)
#loc16 = loc("test.f90":22:5)
-// CHECK-DAG: #[[INT8:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer*8", sizeInBits = 64, encoding = DW_ATE_signed>
+// CHECK-DAG: #[[INT8:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer(kind=8)", sizeInBits = 64, encoding = DW_ATE_signed>
// CHECK-DAG: #[[INT4:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer", sizeInBits = 32, encoding = DW_ATE_signed>
-// CHECK-DAG: #[[REAL8:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real*8", sizeInBits = 64, encoding = DW_ATE_float>
-// CHECK-DAG: #[[LOG1:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "logical*1", sizeInBits = 8, encoding = DW_ATE_boolean>
+// CHECK-DAG: #[[REAL8:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real(kind=8)", sizeInBits = 64, encoding = DW_ATE_float>
+// CHECK-DAG: #[[LOG1:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "logical(kind=1)", sizeInBits = 8, encoding = DW_ATE_boolean>
// CHECK-DAG: #[[REAL4:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real", sizeInBits = 32, encoding = DW_ATE_float>
// CHECK-DAG: #[[LOG4:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "logical", sizeInBits = 32, encoding = DW_ATE_boolean>
// CHECK-DAG: #[[MAIN:.*]] = #llvm.di_subprogram<{{.*}}name = "mn"{{.*}}>
diff --git a/flang/test/Transforms/debug-ref-type.fir b/flang/test/Transforms/debug-ref-type.fir
index 2164a40..daffa29 100644
--- a/flang/test/Transforms/debug-ref-type.fir
+++ b/flang/test/Transforms/debug-ref-type.fir
@@ -5,6 +5,6 @@ module {
}
#loc1 = loc("test.f90":5:1)
-// CHECK: #[[INT8_TY:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer*1", sizeInBits = 8, encoding = DW_ATE_signed>
+// CHECK: #[[INT8_TY:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer(kind=1)", sizeInBits = 8, encoding = DW_ATE_signed>
// CHECK: #[[REF_TY:.*]] = #llvm.di_derived_type<tag = DW_TAG_pointer_type, name = "", baseType = #[[INT8_TY]]{{.*}}>
// CHECK: #llvm.di_subroutine_type<{{.*}}types = #[[REF_TY]], #[[INT8_TY]]>
diff --git a/flang/test/Transforms/debug-tuple-type.fir b/flang/test/Transforms/debug-tuple-type.fir
index b865d49..73a0733 100644
--- a/flang/test/Transforms/debug-tuple-type.fir
+++ b/flang/test/Transforms/debug-tuple-type.fir
@@ -5,7 +5,7 @@ module {
func.func private @_FortranAioOutputDerivedType(!fir.ref<tuple<>>)
}
-// CHECK: #[[F64:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real*8", sizeInBits = 64, encoding = DW_ATE_float>
+// CHECK: #[[F64:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real(kind=8)", sizeInBits = 64, encoding = DW_ATE_float>
// CHECK: #[[CU:.*]] = #llvm.di_compile_unit<{{.*}}>
// CHECK: #[[DTY1:.*]] = #llvm.di_derived_type<tag = DW_TAG_member, name = "", baseType = #[[F64]], sizeInBits = 64, alignInBits = {{.*}}>
// CHECK: #[[DTY2:.*]] = #llvm.di_derived_type<tag = DW_TAG_member, name = "", baseType = #[[F64]], sizeInBits = 64, alignInBits = {{.*}}, offsetInBits = {{.*}}>
diff --git a/flang/test/Transforms/debug-vector-type.fir b/flang/test/Transforms/debug-vector-type.fir
index cfb97ea..9e41d90 100644
--- a/flang/test/Transforms/debug-vector-type.fir
+++ b/flang/test/Transforms/debug-vector-type.fir
@@ -2,22 +2,22 @@
module {
func.func private @foo1(%arg0: !fir.vector<20:bf16>)
-// CHECK-DAG: #[[F16:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real*2", sizeInBits = 16, encoding = DW_ATE_float>
-// CHECK-DAG: #llvm.di_composite_type<tag = DW_TAG_array_type, name = "vector real*2 (20)", baseType = #[[F16]], flags = Vector, sizeInBits = 320, elements = #llvm.di_subrange<count = 20 : i64>>
+// CHECK-DAG: #[[F16:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real(kind=2)", sizeInBits = 16, encoding = DW_ATE_float>
+// CHECK-DAG: #llvm.di_composite_type<tag = DW_TAG_array_type, name = "vector real(kind=2) (20)", baseType = #[[F16]], flags = Vector, sizeInBits = 320, elements = #llvm.di_subrange<count = 20 : i64>>
func.func private @foo2(%arg0: !fir.vector<30:f32>)
// CHECK-DAG: #[[F32:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real", sizeInBits = 32, encoding = DW_ATE_float>
// CHECK-DAG: #llvm.di_composite_type<tag = DW_TAG_array_type, name = "vector real (30)", baseType = #[[F32]], flags = Vector, sizeInBits = 960, elements = #llvm.di_subrange<count = 30 : i64>>
func.func private @foo3(%arg0: !fir.vector<10:f64>)
-// CHECK-DAG: #[[F64:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real*8", sizeInBits = 64, encoding = DW_ATE_float>
-// CHECK-DAG: #llvm.di_composite_type<tag = DW_TAG_array_type, name = "vector real*8 (10)", baseType = #[[F64]], flags = Vector, sizeInBits = 640, elements = #llvm.di_subrange<count = 10 : i64>>
+// CHECK-DAG: #[[F64:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real(kind=8)", sizeInBits = 64, encoding = DW_ATE_float>
+// CHECK-DAG: #llvm.di_composite_type<tag = DW_TAG_array_type, name = "vector real(kind=8) (10)", baseType = #[[F64]], flags = Vector, sizeInBits = 640, elements = #llvm.di_subrange<count = 10 : i64>>
func.func private @foo4(%arg0: !fir.vector<5:i32>)
// CHECK-DAG: #[[I32:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer", sizeInBits = 32, encoding = DW_ATE_signed>
// CHECK-DAG: #llvm.di_composite_type<tag = DW_TAG_array_type, name = "vector integer (5)", baseType = #[[I32]], flags = Vector, sizeInBits = 160, elements = #llvm.di_subrange<count = 5 : i64>>
func.func private @foo5(%arg0: !fir.vector<2:i64>)
-// CHECK-DAG: #[[I64:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer*8", sizeInBits = 64, encoding = DW_ATE_signed>
-// CHECK-DAG: #llvm.di_composite_type<tag = DW_TAG_array_type, name = "vector integer*8 (2)", baseType = #[[I64]], flags = Vector, sizeInBits = 128, elements = #llvm.di_subrange<count = 2 : i64>>
+// CHECK-DAG: #[[I64:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer(kind=8)", sizeInBits = 64, encoding = DW_ATE_signed>
+// CHECK-DAG: #llvm.di_composite_type<tag = DW_TAG_array_type, name = "vector integer(kind=8) (2)", baseType = #[[I64]], flags = Vector, sizeInBits = 128, elements = #llvm.di_subrange<count = 2 : i64>>
}
diff --git a/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp b/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp
index e3e3647..10a7ddf 100644
--- a/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp
+++ b/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp
@@ -644,3 +644,87 @@ TEST_F(FIRBuilderTest, genArithIntegerOverflow) {
auto op4_ioff = op4_iofi.getOverflowAttr().getValue();
EXPECT_EQ(op4_ioff, nsw);
}
+
+TEST_F(FIRBuilderTest, getDescriptorWithNewBaseAddress) {
+ auto builder = getBuilder();
+ auto loc = builder.getUnknownLoc();
+
+ // Build an input fir.box for a 1-D array of i64 with constant extent 10.
+ auto i64Ty = builder.getI64Type();
+ auto seqTy = fir::SequenceType::get({10}, i64Ty);
+ auto refArrTy = fir::ReferenceType::get(seqTy);
+ auto ptrTy = fir::PointerType::get(seqTy);
+ auto boxTy = fir::BoxType::get(ptrTy);
+ // Create an undef box descriptor value (descriptor contents are unspecified).
+ mlir::Value inputBox = fir::UndefOp::create(builder, loc, boxTy);
+
+ // New base address (same element type and properties).
+ mlir::Value addr2 = fir::UndefOp::create(builder, loc, refArrTy);
+
+ mlir::Value newBox = fir::factory::getDescriptorWithNewBaseAddress(
+ builder, loc, inputBox, addr2);
+
+ // The returned descriptor must have the same type as the input box.
+ EXPECT_EQ(newBox.getType(), inputBox.getType());
+
+ // It must be constructed by an embox using the new base address.
+ ASSERT_TRUE(llvm::isa_and_nonnull<fir::EmboxOp>(newBox.getDefiningOp()));
+ auto embox = llvm::dyn_cast<fir::EmboxOp>(newBox.getDefiningOp());
+ EXPECT_EQ(embox.getMemref(), addr2);
+
+ // The shape should be derived from the input box; expect a fir.shape with one
+ // extent that comes from a fir.box_dims reading from the original input box.
+ mlir::Value shape = embox.getShape();
+ ASSERT_TRUE(shape);
+ ASSERT_TRUE(llvm::isa_and_nonnull<fir::ShapeShiftOp>(shape.getDefiningOp()));
+ auto shapeOp = llvm::dyn_cast<fir::ShapeShiftOp>(shape.getDefiningOp());
+ ASSERT_EQ(shapeOp.getExtents().size(), 1u);
+ mlir::Value extent0 = shapeOp.getExtents()[0];
+ ASSERT_TRUE(llvm::isa_and_nonnull<fir::BoxDimsOp>(extent0.getDefiningOp()));
+ auto dimOp = llvm::dyn_cast<fir::BoxDimsOp>(extent0.getDefiningOp());
+ EXPECT_EQ(dimOp.getVal(), inputBox);
+
+ // Also verify the origin comes from a BoxDims on the same input box.
+ ASSERT_EQ(shapeOp.getOrigins().size(), 1u);
+ mlir::Value origin0 = shapeOp.getOrigins()[0];
+ ASSERT_TRUE(llvm::isa_and_nonnull<fir::BoxDimsOp>(origin0.getDefiningOp()));
+ auto lbOp = llvm::dyn_cast<fir::BoxDimsOp>(origin0.getDefiningOp());
+ EXPECT_EQ(lbOp.getVal(), inputBox);
+}
+
+TEST_F(FIRBuilderTest, getDescriptorWithNewBaseAddress_PolymorphicScalar) {
+ auto builder = getBuilder();
+ auto loc = builder.getUnknownLoc();
+
+ // Build a polymorphic scalar: fir.class<ptr<!fir.type<rec>>>.
+ auto recTy = fir::RecordType::get(builder.getContext(), "poly_rec");
+ auto ptrRecTy = fir::PointerType::get(recTy);
+ auto classTy = fir::ClassType::get(ptrRecTy);
+
+ // Input descriptor is an undefined fir.class value.
+ mlir::Value inputBox = fir::UndefOp::create(builder, loc, classTy);
+
+ // New base address of the same element type (reference to the record).
+ auto refRecTy = fir::ReferenceType::get(recTy);
+ mlir::Value newAddr = fir::UndefOp::create(builder, loc, refRecTy);
+
+ mlir::Value newBox = fir::factory::getDescriptorWithNewBaseAddress(
+ builder, loc, inputBox, newAddr);
+
+ // Same descriptor type must be preserved.
+ EXPECT_EQ(newBox.getType(), inputBox.getType());
+
+ // Must be an embox using the new base address and carrying the original box
+ // as mold.
+ ASSERT_TRUE(llvm::isa_and_nonnull<fir::EmboxOp>(newBox.getDefiningOp()));
+ auto embox = llvm::dyn_cast<fir::EmboxOp>(newBox.getDefiningOp());
+ EXPECT_EQ(embox.getMemref(), newAddr);
+
+ // Polymorphic scalar should have no shape operand.
+ mlir::Value shape = embox.getShape();
+ EXPECT_TRUE(shape == nullptr);
+
+ // The type descriptor/mold must be the original input box.
+ mlir::Value tdesc = embox.getSourceBox();
+ EXPECT_EQ(tdesc, inputBox);
+}
diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt
index ae8deab..4824684 100644
--- a/libc/config/linux/aarch64/entrypoints.txt
+++ b/libc/config/linux/aarch64/entrypoints.txt
@@ -325,6 +325,8 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.unistd.dup2
libc.src.unistd.dup3
libc.src.unistd.execve
+ # Disabled while SYS_faccessat2 is unavailable on the buildbot.
+ # libc.src.unistd.faccessat
libc.src.unistd.fchdir
libc.src.unistd.fpathconf
libc.src.unistd.fsync
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index bf2ad4a..87b78a33 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -331,6 +331,7 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.unistd.dup2
libc.src.unistd.dup3
libc.src.unistd.execve
+ libc.src.unistd.faccessat
libc.src.unistd.fchdir
libc.src.unistd.fpathconf
libc.src.unistd.fsync
diff --git a/libc/fuzzing/stdlib/strtointeger_differential_fuzz.cpp b/libc/fuzzing/stdlib/strtointeger_differential_fuzz.cpp
index 097e619..2fabbba 100644
--- a/libc/fuzzing/stdlib/strtointeger_differential_fuzz.cpp
+++ b/libc/fuzzing/stdlib/strtointeger_differential_fuzz.cpp
@@ -44,6 +44,10 @@
// greater than 50% chance for each character to end the string, making the odds
// of getting long numbers very low.
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ if (size < 2) // Needs at least one byte for the base and one byte for the
+ // string.
+ return 0;
+
uint8_t *container = new uint8_t[size + 1];
if (!container)
__builtin_trap();
diff --git a/libc/include/llvm-libc-macros/linux/fcntl-macros.h b/libc/include/llvm-libc-macros/linux/fcntl-macros.h
index aec8a0d..74d406f 100644
--- a/libc/include/llvm-libc-macros/linux/fcntl-macros.h
+++ b/libc/include/llvm-libc-macros/linux/fcntl-macros.h
@@ -61,6 +61,9 @@
// Allow empty relative pathname.
#define AT_EMPTY_PATH 0x1000
+// Perform access checks using the effective user and group IDs.
+#define AT_EACCESS 0x200
+
// Values of SYS_fcntl commands.
#define F_DUPFD 0
#define F_GETFD 1
diff --git a/libc/include/sys/syscall.h.def b/libc/include/sys/syscall.h.def
index 6d74cc6..60e5024 100644
--- a/libc/include/sys/syscall.h.def
+++ b/libc/include/sys/syscall.h.def
@@ -309,6 +309,10 @@
#define SYS_faccessat __NR_faccessat
#endif
+#ifdef __NR_faccessat2
+#define SYS_faccessat2 __NR_faccessat2
+#endif
+
#ifdef __NR_fadvise64
#define SYS_fadvise64 __NR_fadvise64
#endif
diff --git a/libc/include/unistd.yaml b/libc/include/unistd.yaml
index 3ba3ec7..2ff86ea 100644
--- a/libc/include/unistd.yaml
+++ b/libc/include/unistd.yaml
@@ -96,6 +96,15 @@ functions:
- type: const char *
- type: __exec_argv_t
- type: __exec_envp_t
+ - name: faccessat
+ standards:
+ - POSIX
+ return_type: int
+ arguments:
+ - type: int
+ - type: const char *
+ - type: int
+ - type: int
- name: fchdir
standards:
- POSIX
diff --git a/libc/shared/math.h b/libc/shared/math.h
index cccd6a3..4b2a0d8 100644
--- a/libc/shared/math.h
+++ b/libc/shared/math.h
@@ -46,6 +46,7 @@
#include "math/exp10f.h"
#include "math/exp10f16.h"
#include "math/exp10m1f.h"
+#include "math/exp10m1f16.h"
#include "math/expf.h"
#include "math/expf16.h"
#include "math/frexpf.h"
diff --git a/libc/shared/math/exp10m1f16.h b/libc/shared/math/exp10m1f16.h
new file mode 100644
index 0000000..5f18f29
--- /dev/null
+++ b/libc/shared/math/exp10m1f16.h
@@ -0,0 +1,29 @@
+//===-- Shared exp10m1f16 function ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SHARED_MATH_EXP10M1F16_H
+#define LLVM_LIBC_SHARED_MATH_EXP10M1F16_H
+
+#include "include/llvm-libc-macros/float16-macros.h"
+#include "shared/libc_common.h"
+
+#ifdef LIBC_TYPES_HAS_FLOAT16
+
+#include "src/__support/math/exp10m1f16.h"
+
+namespace LIBC_NAMESPACE_DECL {
+namespace shared {
+
+using math::exp10m1f16;
+
+} // namespace shared
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LIBC_TYPES_HAS_FLOAT16
+
+#endif // LLVM_LIBC_SHARED_MATH_EXP10M1F16_H
diff --git a/libc/src/__support/macros/attributes.h b/libc/src/__support/macros/attributes.h
index 145aa3b..d5ff028 100644
--- a/libc/src/__support/macros/attributes.h
+++ b/libc/src/__support/macros/attributes.h
@@ -81,4 +81,14 @@ LIBC_THREAD_MODE_EXTERNAL.
#define LIBC_HAS_VECTOR_TYPE 0
#endif
+#if __has_attribute(no_sanitize)
+// Disable regular and hardware-supported ASan for functions that may
+// intentionally make out-of-bounds access. Disable TSan as well, as it detects
+// out-of-bounds accesses to heap memory.
+#define LIBC_NO_SANITIZE_OOB_ACCESS \
+ __attribute__((no_sanitize("address", "hwaddress", "thread")))
+#else
+#define LIBC_NO_SANITIZE_OOB_ACCESS
+#endif
+
#endif // LLVM_LIBC_SRC___SUPPORT_MACROS_ATTRIBUTES_H
diff --git a/libc/src/__support/math/CMakeLists.txt b/libc/src/__support/math/CMakeLists.txt
index 84c1b15..98f9bb42 100644
--- a/libc/src/__support/math/CMakeLists.txt
+++ b/libc/src/__support/math/CMakeLists.txt
@@ -499,6 +499,23 @@ add_header_library(
)
add_header_library(
+ exp10m1f16
+ HDRS
+ exp10m1f16.h
+ DEPENDS
+ .exp10f16_utils
+ libc.src.__support.FPUtil.cast
+ libc.src.__support.FPUtil.except_value_utils
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.multiply_add
+ libc.src.__support.FPUtil.polyeval
+ libc.src.__support.FPUtil.rounding_mode
+ libc.src.__support.macros.optimization
+ libc.src.__support.macros.properties.cpu_features
+)
+
+add_header_library(
erff
HDRS
erff.h
diff --git a/libc/src/__support/math/exp10m1f16.h b/libc/src/__support/math/exp10m1f16.h
new file mode 100644
index 0000000..6367a85
--- /dev/null
+++ b/libc/src/__support/math/exp10m1f16.h
@@ -0,0 +1,185 @@
+//===-- Implementation header for exp10m1f16 --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC___SUPPORT_MATH_EXP10M1F16_H
+#define LLVM_LIBC_SRC___SUPPORT_MATH_EXP10M1F16_H
+
+#include "include/llvm-libc-macros/float16-macros.h"
+
+#ifdef LIBC_TYPES_HAS_FLOAT16
+
+#include "exp10f16_utils.h"
+#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/PolyEval.h"
+#include "src/__support/FPUtil/cast.h"
+#include "src/__support/FPUtil/except_value_utils.h"
+#include "src/__support/FPUtil/multiply_add.h"
+#include "src/__support/FPUtil/rounding_mode.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/optimization.h"
+#include "src/__support/macros/properties/cpu_features.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+namespace math {
+
+LIBC_INLINE static constexpr float16 exp10m1f16(float16 x) {
+
+#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+ constexpr fputil::ExceptValues<float16, 3> EXP10M1F16_EXCEPTS_LO = {{
+ // (input, RZ output, RU offset, RD offset, RN offset)
+ // x = 0x1.5c4p-4, exp10m1f16(x) = 0x1.bacp-3 (RZ)
+ {0x2d71U, 0x32ebU, 1U, 0U, 0U},
+ // x = -0x1.5ep-13, exp10m1f16(x) = -0x1.92cp-12 (RZ)
+ {0x8978U, 0x8e4bU, 0U, 1U, 0U},
+ // x = -0x1.e2p-10, exp10m1f16(x) = -0x1.14cp-8 (RZ)
+ {0x9788U, 0x9c53U, 0U, 1U, 0U},
+ }};
+
+#ifdef LIBC_TARGET_CPU_HAS_FMA_FLOAT
+ constexpr size_t N_EXP10M1F16_EXCEPTS_HI = 3;
+#else
+ constexpr size_t N_EXP10M1F16_EXCEPTS_HI = 6;
+#endif
+
+ constexpr fputil::ExceptValues<float16, N_EXP10M1F16_EXCEPTS_HI>
+ EXP10M1F16_EXCEPTS_HI = {{
+ // (input, RZ output, RU offset, RD offset, RN offset)
+ // x = 0x1.8f4p-2, exp10m1f16(x) = 0x1.744p+0 (RZ)
+ {0x363dU, 0x3dd1U, 1U, 0U, 0U},
+ // x = 0x1.95cp-2, exp10m1f16(x) = 0x1.7d8p+0 (RZ)
+ {0x3657U, 0x3df6U, 1U, 0U, 0U},
+ // x = 0x1.d04p-2, exp10m1f16(x) = 0x1.d7p+0 (RZ)
+ {0x3741U, 0x3f5cU, 1U, 0U, 1U},
+#ifndef LIBC_TARGET_CPU_HAS_FMA_FLOAT
+ // x = 0x1.0cp+1, exp10m1f16(x) = 0x1.ec4p+6 (RZ)
+ {0x4030U, 0x57b1U, 1U, 0U, 1U},
+ // x = 0x1.1b8p+1, exp10m1f16(x) = 0x1.45cp+7 (RZ)
+ {0x406eU, 0x5917U, 1U, 0U, 1U},
+ // x = 0x1.2f4p+2, exp10m1f16(x) = 0x1.ab8p+15 (RZ)
+ {0x44bdU, 0x7aaeU, 1U, 0U, 1U},
+#endif
+ }};
+#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+
+ using FPBits = fputil::FPBits<float16>;
+ FPBits x_bits(x);
+
+ uint16_t x_u = x_bits.uintval();
+ uint16_t x_abs = x_u & 0x7fffU;
+
+ // When |x| <= 2^(-3), or |x| >= 11 * log10(2), or x is NaN.
+ if (LIBC_UNLIKELY(x_abs <= 0x3000U || x_abs >= 0x429fU)) {
+ // exp10m1(NaN) = NaN
+ if (x_bits.is_nan()) {
+ if (x_bits.is_signaling_nan()) {
+ fputil::raise_except_if_required(FE_INVALID);
+ return FPBits::quiet_nan().get_val();
+ }
+
+ return x;
+ }
+
+ // When x >= 16 * log10(2).
+ if (x_u >= 0x44d1U && x_bits.is_pos()) {
+ // exp10m1(+inf) = +inf
+ if (x_bits.is_inf())
+ return FPBits::inf().get_val();
+
+ switch (fputil::quick_get_round()) {
+ case FE_TONEAREST:
+ case FE_UPWARD:
+ fputil::set_errno_if_required(ERANGE);
+ fputil::raise_except_if_required(FE_OVERFLOW | FE_INEXACT);
+ return FPBits::inf().get_val();
+ default:
+ return FPBits::max_normal().get_val();
+ }
+ }
+
+ // When x < -11 * log10(2).
+ if (x_u > 0xc29fU) {
+ // exp10m1(-inf) = -1
+ if (x_bits.is_inf())
+ return FPBits::one(Sign::NEG).get_val();
+
+ // When x >= -0x1.ce4p+1, round(10^x - 1, HP, RN) = -0x1.ffcp-1.
+ if (x_u <= 0xc339U) {
+ return fputil::round_result_slightly_down(
+ fputil::cast<float16>(-0x1.ffcp-1));
+ }
+
+ // When x < -0x1.ce4p+1, round(10^x - 1, HP, RN) = -1.
+ switch (fputil::quick_get_round()) {
+ case FE_TONEAREST:
+ case FE_DOWNWARD:
+ return FPBits::one(Sign::NEG).get_val();
+ default:
+ return fputil::cast<float16>(-0x1.ffcp-1);
+ }
+ }
+
+ // When |x| <= 2^(-3).
+ if (x_abs <= 0x3000U) {
+ if (LIBC_UNLIKELY(x_abs == 0))
+ return x;
+
+#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+ if (auto r = EXP10M1F16_EXCEPTS_LO.lookup(x_u);
+ LIBC_UNLIKELY(r.has_value()))
+ return r.value();
+#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+
+ float xf = x;
+ // Degree-5 minimax polynomial generated by Sollya with the following
+ // commands:
+ // > display = hexadecimal;
+ // > P = fpminimax((10^x - 1)/x, 4, [|SG...|], [-2^-3, 2^-3]);
+ // > x * P;
+ return fputil::cast<float16>(
+ xf * fputil::polyeval(xf, 0x1.26bb1cp+1f, 0x1.5351c8p+1f,
+ 0x1.04704p+1f, 0x1.2ce084p+0f, 0x1.14a6bep-1f));
+ }
+ }
+
+ // When x is 1, 2, or 3. These are hard-to-round cases with exact results.
+ // 10^4 - 1 = 9'999 is not exactly representable as a float16, but luckily the
+ // polynomial approximation gives the correct result for x = 4 in all
+ // rounding modes.
+ if (LIBC_UNLIKELY((x_u & ~(0x3c00U | 0x4000U | 0x4200U | 0x4400U)) == 0)) {
+ switch (x_u) {
+ case 0x3c00U: // x = 1.0f16
+ return fputil::cast<float16>(9.0);
+ case 0x4000U: // x = 2.0f16
+ return fputil::cast<float16>(99.0);
+ case 0x4200U: // x = 3.0f16
+ return fputil::cast<float16>(999.0);
+ }
+ }
+
+#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+ if (auto r = EXP10M1F16_EXCEPTS_HI.lookup(x_u); LIBC_UNLIKELY(r.has_value()))
+ return r.value();
+#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+
+ // exp10(x) = exp2((hi + mid) * log2(10)) * exp10(lo)
+ auto [exp2_hi_mid, exp10_lo] = exp10_range_reduction(x);
+ // exp10m1(x) = exp2((hi + mid) * log2(lo)) * exp10(lo) - 1
+ return fputil::cast<float16>(
+ fputil::multiply_add(exp2_hi_mid, exp10_lo, -1.0f));
+}
+
+} // namespace math
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LIBC_TYPES_HAS_FLOAT16
+
+#endif // LLVM_LIBC_SRC___SUPPORT_MATH_EXP10M1F16_H
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index 8074a39..99c1b08 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -1603,18 +1603,7 @@ add_entrypoint_object(
HDRS
../exp10m1f16.h
DEPENDS
- libc.hdr.errno_macros
- libc.hdr.fenv_macros
- libc.src.__support.FPUtil.cast
- libc.src.__support.FPUtil.except_value_utils
- libc.src.__support.FPUtil.fenv_impl
- libc.src.__support.FPUtil.fp_bits
- libc.src.__support.FPUtil.multiply_add
- libc.src.__support.FPUtil.polyeval
- libc.src.__support.FPUtil.rounding_mode
- libc.src.__support.macros.optimization
- libc.src.__support.macros.properties.cpu_features
- libc.src.__support.math.exp10f16_utils
+ libc.src.__support.math.exp10m1f16
)
add_entrypoint_object(
diff --git a/libc/src/math/generic/exp10m1f16.cpp b/libc/src/math/generic/exp10m1f16.cpp
index 6c2fdbe..8a3c4ab 100644
--- a/libc/src/math/generic/exp10m1f16.cpp
+++ b/libc/src/math/generic/exp10m1f16.cpp
@@ -7,166 +7,12 @@
//===----------------------------------------------------------------------===//
#include "src/math/exp10m1f16.h"
-#include "hdr/errno_macros.h"
-#include "hdr/fenv_macros.h"
-#include "src/__support/FPUtil/FEnvImpl.h"
-#include "src/__support/FPUtil/FPBits.h"
-#include "src/__support/FPUtil/PolyEval.h"
-#include "src/__support/FPUtil/cast.h"
-#include "src/__support/FPUtil/except_value_utils.h"
-#include "src/__support/FPUtil/multiply_add.h"
-#include "src/__support/FPUtil/rounding_mode.h"
-#include "src/__support/common.h"
-#include "src/__support/macros/config.h"
-#include "src/__support/macros/optimization.h"
-#include "src/__support/macros/properties/cpu_features.h"
-#include "src/__support/math/exp10f16_utils.h"
+#include "src/__support/math/exp10m1f16.h"
namespace LIBC_NAMESPACE_DECL {
-#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
-static constexpr fputil::ExceptValues<float16, 3> EXP10M1F16_EXCEPTS_LO = {{
- // (input, RZ output, RU offset, RD offset, RN offset)
- // x = 0x1.5c4p-4, exp10m1f16(x) = 0x1.bacp-3 (RZ)
- {0x2d71U, 0x32ebU, 1U, 0U, 0U},
- // x = -0x1.5ep-13, exp10m1f16(x) = -0x1.92cp-12 (RZ)
- {0x8978U, 0x8e4bU, 0U, 1U, 0U},
- // x = -0x1.e2p-10, exp10m1f16(x) = -0x1.14cp-8 (RZ)
- {0x9788U, 0x9c53U, 0U, 1U, 0U},
-}};
-
-#ifdef LIBC_TARGET_CPU_HAS_FMA_FLOAT
-static constexpr size_t N_EXP10M1F16_EXCEPTS_HI = 3;
-#else
-static constexpr size_t N_EXP10M1F16_EXCEPTS_HI = 6;
-#endif
-
-static constexpr fputil::ExceptValues<float16, N_EXP10M1F16_EXCEPTS_HI>
- EXP10M1F16_EXCEPTS_HI = {{
- // (input, RZ output, RU offset, RD offset, RN offset)
- // x = 0x1.8f4p-2, exp10m1f16(x) = 0x1.744p+0 (RZ)
- {0x363dU, 0x3dd1U, 1U, 0U, 0U},
- // x = 0x1.95cp-2, exp10m1f16(x) = 0x1.7d8p+0 (RZ)
- {0x3657U, 0x3df6U, 1U, 0U, 0U},
- // x = 0x1.d04p-2, exp10m1f16(x) = 0x1.d7p+0 (RZ)
- {0x3741U, 0x3f5cU, 1U, 0U, 1U},
-#ifndef LIBC_TARGET_CPU_HAS_FMA_FLOAT
- // x = 0x1.0cp+1, exp10m1f16(x) = 0x1.ec4p+6 (RZ)
- {0x4030U, 0x57b1U, 1U, 0U, 1U},
- // x = 0x1.1b8p+1, exp10m1f16(x) = 0x1.45cp+7 (RZ)
- {0x406eU, 0x5917U, 1U, 0U, 1U},
- // x = 0x1.2f4p+2, exp10m1f16(x) = 0x1.ab8p+15 (RZ)
- {0x44bdU, 0x7aaeU, 1U, 0U, 1U},
-#endif
- }};
-#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
-
LLVM_LIBC_FUNCTION(float16, exp10m1f16, (float16 x)) {
- using FPBits = fputil::FPBits<float16>;
- FPBits x_bits(x);
-
- uint16_t x_u = x_bits.uintval();
- uint16_t x_abs = x_u & 0x7fffU;
-
- // When |x| <= 2^(-3), or |x| >= 11 * log10(2), or x is NaN.
- if (LIBC_UNLIKELY(x_abs <= 0x3000U || x_abs >= 0x429fU)) {
- // exp10m1(NaN) = NaN
- if (x_bits.is_nan()) {
- if (x_bits.is_signaling_nan()) {
- fputil::raise_except_if_required(FE_INVALID);
- return FPBits::quiet_nan().get_val();
- }
-
- return x;
- }
-
- // When x >= 16 * log10(2).
- if (x_u >= 0x44d1U && x_bits.is_pos()) {
- // exp10m1(+inf) = +inf
- if (x_bits.is_inf())
- return FPBits::inf().get_val();
-
- switch (fputil::quick_get_round()) {
- case FE_TONEAREST:
- case FE_UPWARD:
- fputil::set_errno_if_required(ERANGE);
- fputil::raise_except_if_required(FE_OVERFLOW | FE_INEXACT);
- return FPBits::inf().get_val();
- default:
- return FPBits::max_normal().get_val();
- }
- }
-
- // When x < -11 * log10(2).
- if (x_u > 0xc29fU) {
- // exp10m1(-inf) = -1
- if (x_bits.is_inf())
- return FPBits::one(Sign::NEG).get_val();
-
- // When x >= -0x1.ce4p+1, round(10^x - 1, HP, RN) = -0x1.ffcp-1.
- if (x_u <= 0xc339U) {
- return fputil::round_result_slightly_down(
- fputil::cast<float16>(-0x1.ffcp-1));
- }
-
- // When x < -0x1.ce4p+1, round(10^x - 1, HP, RN) = -1.
- switch (fputil::quick_get_round()) {
- case FE_TONEAREST:
- case FE_DOWNWARD:
- return FPBits::one(Sign::NEG).get_val();
- default:
- return fputil::cast<float16>(-0x1.ffcp-1);
- }
- }
-
- // When |x| <= 2^(-3).
- if (x_abs <= 0x3000U) {
- if (LIBC_UNLIKELY(x_abs == 0))
- return x;
-
-#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
- if (auto r = EXP10M1F16_EXCEPTS_LO.lookup(x_u);
- LIBC_UNLIKELY(r.has_value()))
- return r.value();
-#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
-
- float xf = x;
- // Degree-5 minimax polynomial generated by Sollya with the following
- // commands:
- // > display = hexadecimal;
- // > P = fpminimax((10^x - 1)/x, 4, [|SG...|], [-2^-3, 2^-3]);
- // > x * P;
- return fputil::cast<float16>(
- xf * fputil::polyeval(xf, 0x1.26bb1cp+1f, 0x1.5351c8p+1f,
- 0x1.04704p+1f, 0x1.2ce084p+0f, 0x1.14a6bep-1f));
- }
- }
-
- // When x is 1, 2, or 3. These are hard-to-round cases with exact results.
- // 10^4 - 1 = 9'999 is not exactly representable as a float16, but luckily the
- // polynomial approximation gives the correct result for x = 4 in all
- // rounding modes.
- if (LIBC_UNLIKELY((x_u & ~(0x3c00U | 0x4000U | 0x4200U | 0x4400U)) == 0)) {
- switch (x_u) {
- case 0x3c00U: // x = 1.0f16
- return fputil::cast<float16>(9.0);
- case 0x4000U: // x = 2.0f16
- return fputil::cast<float16>(99.0);
- case 0x4200U: // x = 3.0f16
- return fputil::cast<float16>(999.0);
- }
- }
-
-#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
- if (auto r = EXP10M1F16_EXCEPTS_HI.lookup(x_u); LIBC_UNLIKELY(r.has_value()))
- return r.value();
-#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
-
- // exp10(x) = exp2((hi + mid) * log2(10)) * exp10(lo)
- auto [exp2_hi_mid, exp10_lo] = exp10_range_reduction(x);
- // exp10m1(x) = exp2((hi + mid) * log2(lo)) * exp10(lo) - 1
- return fputil::cast<float16>(
- fputil::multiply_add(exp2_hi_mid, exp10_lo, -1.0f));
+ return math::exp10m1f16(x);
}
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/string/CMakeLists.txt b/libc/src/string/CMakeLists.txt
index b8cdb2a7..83c9564 100644
--- a/libc/src/string/CMakeLists.txt
+++ b/libc/src/string/CMakeLists.txt
@@ -22,6 +22,7 @@ add_header_library(
libc.src.__support.CPP.type_traits
libc.src.__support.CPP.simd
libc.src.__support.common
+ libc.src.__support.macros.attributes
libc.src.string.memory_utils.inline_memcpy
${string_config_options}
)
diff --git a/libc/src/string/memory_utils/aarch64/inline_strlen.h b/libc/src/string/memory_utils/aarch64/inline_strlen.h
index 36fd1aa..87f5ccd 100644
--- a/libc/src/string/memory_utils/aarch64/inline_strlen.h
+++ b/libc/src/string/memory_utils/aarch64/inline_strlen.h
@@ -17,7 +17,7 @@
namespace LIBC_NAMESPACE_DECL {
namespace neon {
-[[gnu::no_sanitize_address]] [[maybe_unused]] LIBC_INLINE static size_t
+[[maybe_unused]] LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE static size_t
string_length(const char *src) {
using Vector __attribute__((may_alias)) = uint8x8_t;
diff --git a/libc/src/string/memory_utils/generic/inline_strlen.h b/libc/src/string/memory_utils/generic/inline_strlen.h
index d7435af..69700e8 100644
--- a/libc/src/string/memory_utils/generic/inline_strlen.h
+++ b/libc/src/string/memory_utils/generic/inline_strlen.h
@@ -24,8 +24,7 @@ LIBC_INLINE constexpr cpp::simd_mask<char> shift_mask(cpp::simd_mask<char> m,
return cpp::bit_cast<cpp::simd_mask<char>>(r);
}
-[[clang::no_sanitize("address")]] LIBC_INLINE size_t
-string_length(const char *src) {
+LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE size_t string_length(const char *src) {
constexpr cpp::simd<char> null_byte = cpp::splat('\0');
size_t alignment = alignof(cpp::simd<char>);
diff --git a/libc/src/string/memory_utils/x86_64/inline_strlen.h b/libc/src/string/memory_utils/x86_64/inline_strlen.h
index 739f8c1..9e10d58 100644
--- a/libc/src/string/memory_utils/x86_64/inline_strlen.h
+++ b/libc/src/string/memory_utils/x86_64/inline_strlen.h
@@ -18,12 +18,12 @@ namespace LIBC_NAMESPACE_DECL {
namespace string_length_internal {
// Return a bit-mask with the nth bit set if the nth-byte in block_ptr is zero.
template <typename Vector, typename Mask>
-[[gnu::no_sanitize_address]] LIBC_INLINE static Mask
+LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE static Mask
compare_and_mask(const Vector *block_ptr);
template <typename Vector, typename Mask,
decltype(compare_and_mask<Vector, Mask>)>
-[[gnu::no_sanitize_address]] LIBC_INLINE static size_t
+LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE static size_t
string_length_vector(const char *src) {
uintptr_t misalign_bytes = reinterpret_cast<uintptr_t>(src) % sizeof(Vector);
diff --git a/libc/src/string/string_utils.h b/libc/src/string/string_utils.h
index 9d636d0..7feef56 100644
--- a/libc/src/string/string_utils.h
+++ b/libc/src/string/string_utils.h
@@ -19,6 +19,7 @@
#include "hdr/types/size_t.h"
#include "src/__support/CPP/bitset.h"
#include "src/__support/CPP/type_traits.h" // cpp::is_same_v
+#include "src/__support/macros/attributes.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
#include "src/string/memory_utils/inline_memcpy.h"
@@ -119,7 +120,7 @@ template <typename T> LIBC_INLINE size_t string_length(const T *src) {
}
template <typename Word>
-[[gnu::no_sanitize_address]] LIBC_INLINE void *
+LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE void *
find_first_character_wide_read(const unsigned char *src, unsigned char ch,
size_t n) {
const unsigned char *char_ptr = src;
diff --git a/libc/src/unistd/CMakeLists.txt b/libc/src/unistd/CMakeLists.txt
index c66a3a4..78c3bf8 100644
--- a/libc/src/unistd/CMakeLists.txt
+++ b/libc/src/unistd/CMakeLists.txt
@@ -56,6 +56,13 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ faccessat
+ ALIAS
+ DEPENDS
+ .${LIBC_TARGET_OS}.faccessat
+)
+
+add_entrypoint_object(
fchdir
ALIAS
DEPENDS
diff --git a/libc/src/unistd/faccessat.h b/libc/src/unistd/faccessat.h
new file mode 100644
index 0000000..0dc834d
--- /dev/null
+++ b/libc/src/unistd/faccessat.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for faccessat ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_UNISTD_FACCESSAT_H
+#define LLVM_LIBC_SRC_UNISTD_FACCESSAT_H
+
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+int faccessat(int fd, const char *path, int amode, int flag);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_UNISTD_FACCESSAT_H
diff --git a/libc/src/unistd/linux/CMakeLists.txt b/libc/src/unistd/linux/CMakeLists.txt
index 2d510f3..dff6ba2 100644
--- a/libc/src/unistd/linux/CMakeLists.txt
+++ b/libc/src/unistd/linux/CMakeLists.txt
@@ -81,6 +81,19 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ faccessat
+ SRCS
+ faccessat.cpp
+ HDRS
+ ../faccessat.h
+ DEPENDS
+ libc.hdr.fcntl_macros
+ libc.include.sys_syscall
+ libc.src.__support.OSUtil.osutil
+ libc.src.errno.errno
+)
+
+add_entrypoint_object(
fchdir
SRCS
fchdir.cpp
diff --git a/libc/src/unistd/linux/access.cpp b/libc/src/unistd/linux/access.cpp
index 55cd6ad..f06eec5 100644
--- a/libc/src/unistd/linux/access.cpp
+++ b/libc/src/unistd/linux/access.cpp
@@ -23,7 +23,7 @@ LLVM_LIBC_FUNCTION(int, access, (const char *path, int mode)) {
int ret = LIBC_NAMESPACE::syscall_impl<int>(SYS_access, path, mode);
#elif defined(SYS_faccessat)
int ret =
- LIBC_NAMESPACE::syscall_impl<int>(SYS_faccessat, AT_FDCWD, path, mode, 0);
+ LIBC_NAMESPACE::syscall_impl<int>(SYS_faccessat, AT_FDCWD, path, mode);
#else
#error "access and faccessat syscalls not available."
#endif
diff --git a/libc/src/unistd/linux/faccessat.cpp b/libc/src/unistd/linux/faccessat.cpp
new file mode 100644
index 0000000..7a2a29c
--- /dev/null
+++ b/libc/src/unistd/linux/faccessat.cpp
@@ -0,0 +1,37 @@
+//===-- Linux implementation of faccessat ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/unistd/faccessat.h"
+
+#include "src/__support/OSUtil/syscall.h" // For internal syscall function.
+#include "src/__support/common.h"
+
+#include "hdr/fcntl_macros.h"
+#include "src/__support/libc_errno.h"
+#include "src/__support/macros/config.h"
+#include <sys/syscall.h> // For syscall numbers.
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(int, faccessat,
+ (int fd, const char *path, int amode, int flag)) {
+#ifdef SYS_faccessat2
+ int ret =
+ LIBC_NAMESPACE::syscall_impl<int>(SYS_faccessat2, fd, path, amode, flag);
+#else
+#error "faccessat2 syscall is not available."
+#endif
+
+ if (ret < 0) {
+ libc_errno = -ret;
+ return -1;
+ }
+ return 0;
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/test/shared/CMakeLists.txt b/libc/test/shared/CMakeLists.txt
index 13a0aae..ea4634c 100644
--- a/libc/test/shared/CMakeLists.txt
+++ b/libc/test/shared/CMakeLists.txt
@@ -37,6 +37,7 @@ add_fp_unittest(
libc.src.__support.math.cospif16
libc.src.__support.math.dsqrtl
libc.src.__support.math.exp10m1f
+ libc.src.__support.math.exp10m1f16
libc.src.__support.math.erff
libc.src.__support.math.exp
libc.src.__support.math.exp10
diff --git a/libc/test/shared/shared_math_test.cpp b/libc/test/shared/shared_math_test.cpp
index 25bf5ad..1722193 100644
--- a/libc/test/shared/shared_math_test.cpp
+++ b/libc/test/shared/shared_math_test.cpp
@@ -27,6 +27,7 @@ TEST(LlvmLibcSharedMathTest, AllFloat16) {
EXPECT_FP_EQ(0x1p+0f16, LIBC_NAMESPACE::shared::coshf16(0.0f16));
EXPECT_FP_EQ(0x1p+0f16, LIBC_NAMESPACE::shared::cospif16(0.0f16));
EXPECT_FP_EQ(0x1p+0f16, LIBC_NAMESPACE::shared::exp10f16(0.0f16));
+ EXPECT_FP_EQ(0x0p+0f16, LIBC_NAMESPACE::shared::exp10m1f16(0.0f16));
EXPECT_FP_EQ(0x1p+0f16, LIBC_NAMESPACE::shared::expf16(0.0f16));
diff --git a/libc/test/src/unistd/CMakeLists.txt b/libc/test/src/unistd/CMakeLists.txt
index 6630a7e..44f28ff 100644
--- a/libc/test/src/unistd/CMakeLists.txt
+++ b/libc/test/src/unistd/CMakeLists.txt
@@ -94,6 +94,23 @@ add_libc_unittest(
)
add_libc_unittest(
+ faccessat_test
+ SUITE
+ libc_unistd_unittests
+ SRCS
+ faccessat_test.cpp
+ DEPENDS
+ libc.include.unistd
+ libc.src.errno.errno
+ libc.src.fcntl.open
+ libc.src.unistd.faccessat
+ libc.src.unistd.close
+ libc.src.unistd.unlink
+ libc.test.UnitTest.ErrnoCheckingTest
+ libc.test.UnitTest.ErrnoSetterMatcher
+)
+
+add_libc_unittest(
fchdir_test
SUITE
libc_unistd_unittests
diff --git a/libc/test/src/unistd/faccessat_test.cpp b/libc/test/src/unistd/faccessat_test.cpp
new file mode 100644
index 0000000..6280b14
--- /dev/null
+++ b/libc/test/src/unistd/faccessat_test.cpp
@@ -0,0 +1,115 @@
+//===-- Unittests for faccessat -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/fcntl/open.h"
+#include "src/unistd/close.h"
+#include "src/unistd/faccessat.h"
+#include "src/unistd/unlink.h"
+#include "test/UnitTest/ErrnoCheckingTest.h"
+#include "test/UnitTest/ErrnoSetterMatcher.h"
+#include "test/UnitTest/Test.h"
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+namespace {
+
+using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Fails;
+using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds;
+
+using LlvmLibcFaccessatTest = LIBC_NAMESPACE::testing::ErrnoCheckingTest;
+
+TEST_F(LlvmLibcFaccessatTest, WithAtFdcwd) {
+ // Test access checks on a file with AT_FDCWD and no flags, equivalent to
+ // access().
+ constexpr const char *FILENAME = "faccessat_basic.test";
+ auto TEST_FILE = libc_make_test_file_path(FILENAME);
+
+ // Check permissions on a file with full permissions
+ int fd = LIBC_NAMESPACE::open(TEST_FILE, O_WRONLY | O_CREAT, S_IRWXU);
+ ASSERT_ERRNO_SUCCESS();
+ ASSERT_GT(fd, 0);
+ ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0));
+
+ ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, F_OK, 0),
+ Succeeds(0));
+ ASSERT_THAT(
+ LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, X_OK | W_OK | R_OK, 0),
+ Succeeds(0));
+ ASSERT_THAT(LIBC_NAMESPACE::unlink(TEST_FILE), Succeeds(0));
+
+ // Check permissions on a file with execute-only permission
+ fd = LIBC_NAMESPACE::open(TEST_FILE, O_WRONLY | O_CREAT, S_IXUSR);
+ ASSERT_ERRNO_SUCCESS();
+ ASSERT_GT(fd, 0);
+ ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0));
+
+ ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, F_OK, 0),
+ Succeeds(0));
+ ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, X_OK, 0),
+ Succeeds(0));
+ ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, R_OK, 0),
+ Fails(EACCES));
+ ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, W_OK, 0),
+ Fails(EACCES));
+ ASSERT_THAT(LIBC_NAMESPACE::unlink(TEST_FILE), Succeeds(0));
+}
+
+TEST_F(LlvmLibcFaccessatTest, NonExistentFile) {
+ ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, "faccessat_nonexistent.test",
+ F_OK, 0),
+ Fails(ENOENT));
+}
+
+TEST_F(LlvmLibcFaccessatTest, AtEaccess) {
+ // With AT_EACCESS, faccessat checks permissions using the effective user ID,
+ // but the effective and real user ID will be the same here and changing that
+ // is not feasible in a test, so this is just a basic sanity check.
+ constexpr const char *FILENAME = "faccessat_eaccess.test";
+ auto TEST_FILE = libc_make_test_file_path(FILENAME);
+
+ int fd = LIBC_NAMESPACE::open(TEST_FILE, O_WRONLY | O_CREAT, S_IRWXU);
+ ASSERT_ERRNO_SUCCESS();
+ ASSERT_GT(fd, 0);
+ ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0));
+
+ ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, X_OK | W_OK | R_OK,
+ AT_EACCESS),
+ Succeeds(0));
+
+ ASSERT_THAT(LIBC_NAMESPACE::unlink(TEST_FILE), Succeeds(0));
+}
+
+TEST_F(LlvmLibcFaccessatTest, AtEmptyPath) {
+ constexpr const char *FILENAME = "faccessat_atemptypath.test";
+ auto TEST_FILE = libc_make_test_file_path(FILENAME);
+
+ int fd = LIBC_NAMESPACE::open(TEST_FILE, O_WRONLY | O_CREAT, S_IRWXU);
+ ASSERT_ERRNO_SUCCESS();
+ ASSERT_GT(fd, 0);
+
+ // Check permissions on the file referred to by fd
+ ASSERT_THAT(LIBC_NAMESPACE::faccessat(fd, "", F_OK, AT_EMPTY_PATH),
+ Succeeds(0));
+ ASSERT_THAT(
+ LIBC_NAMESPACE::faccessat(fd, "", X_OK | W_OK | R_OK, AT_EMPTY_PATH),
+ Succeeds(0));
+
+ ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0));
+ ASSERT_THAT(LIBC_NAMESPACE::unlink(TEST_FILE), Succeeds(0));
+
+ // Check permissions on the current working directory
+ ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, "", F_OK, AT_EMPTY_PATH),
+ Succeeds(0));
+ ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, "", X_OK | W_OK | R_OK,
+ AT_EMPTY_PATH),
+ Succeeds(0));
+}
+
+} // namespace
diff --git a/libcxx/docs/index.rst b/libcxx/docs/index.rst
index a44c316..4d5064b 100644
--- a/libcxx/docs/index.rst
+++ b/libcxx/docs/index.rst
@@ -133,7 +133,7 @@ velocity, libc++ drops support for older compilers as newer ones are released.
Compiler Versions Restrictions Support policy
============ =================== ========================== =====================
Clang 19, 20, 21-git latest two stable releases per `LLVM's release page <https://releases.llvm.org>`_ and the development version
-AppleClang 16.4 latest stable release per `Xcode's release page <https://developer.apple.com/documentation/xcode-release-notes>`_
+AppleClang 26.0 latest stable release per `Xcode's release page <https://developer.apple.com/documentation/xcode-release-notes>`_
Open XL 17.1.3 (AIX) latest stable release per `Open XL's documentation page <https://www.ibm.com/docs/en/openxl-c-and-cpp-aix>`_
GCC 15 In C++11 or later only latest stable release per `GCC's release page <https://gcc.gnu.org/releases.html>`_
============ =================== ========================== =====================
diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index e050362..ddace8b 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -839,6 +839,7 @@ set(files
__type_traits/is_floating_point.h
__type_traits/is_function.h
__type_traits/is_fundamental.h
+ __type_traits/is_generic_transparent_comparator.h
__type_traits/is_implicit_lifetime.h
__type_traits/is_implicitly_default_constructible.h
__type_traits/is_integral.h
@@ -881,6 +882,7 @@ set(files
__type_traits/make_32_64_or_128_bit.h
__type_traits/make_const_lvalue_ref.h
__type_traits/make_signed.h
+ __type_traits/make_transparent.h
__type_traits/make_unsigned.h
__type_traits/maybe_const.h
__type_traits/nat.h
diff --git a/libcxx/include/__algorithm/comp.h b/libcxx/include/__algorithm/comp.h
index ab3c598..38e2fb9 100644
--- a/libcxx/include/__algorithm/comp.h
+++ b/libcxx/include/__algorithm/comp.h
@@ -11,6 +11,7 @@
#include <__config>
#include <__type_traits/desugars_to.h>
+#include <__type_traits/is_generic_transparent_comparator.h>
#include <__type_traits/is_integral.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -48,6 +49,9 @@ inline const bool __desugars_to_v<__less_tag, __less<>, _Tp, _Tp> = true;
template <class _Tp>
inline const bool __desugars_to_v<__totally_ordered_less_tag, __less<>, _Tp, _Tp> = is_integral<_Tp>::value;
+template <>
+inline const bool __is_generic_transparent_comparator_v<__less<> > = true;
+
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP___ALGORITHM_COMP_H
diff --git a/libcxx/include/__algorithm/find.h b/libcxx/include/__algorithm/find.h
index 5f32ae8..72e201a 100644
--- a/libcxx/include/__algorithm/find.h
+++ b/libcxx/include/__algorithm/find.h
@@ -69,7 +69,7 @@ _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* __find_vectorized(_Tp* __first, _Tp* __last,
auto __orig_first = __first;
- auto __values = static_cast<__simd_vector<_Up, __vec_size>>(__value); // broadcast the value
+ auto __values = static_cast<__simd_vector<_Tp, __vec_size>>(__value); // broadcast the value
while (static_cast<size_t>(__last - __first) >= __unroll_count * __vec_size) [[__unlikely__]] {
__vec __lhs[__unroll_count];
diff --git a/libcxx/include/__functional/is_transparent.h b/libcxx/include/__functional/is_transparent.h
index 567df1a..c2c6fbc 100644
--- a/libcxx/include/__functional/is_transparent.h
+++ b/libcxx/include/__functional/is_transparent.h
@@ -29,6 +29,14 @@ inline const bool __is_transparent_v<_Tp, _Key, __void_t<typename _Tp::is_transp
#endif
+// Two types are considered transparently comparable if `comparator(key, arg)` is equivalent to `comparator(key,
+// <implicit cast to KeyT>(arg))`.
+//
+// This is different from `__is_transparent_v`, which is only a property of the comparator and doesn't provide
+// additional semantic guarantees.
+template <class _Comparator, class _KeyT, class _Arg, class = void>
+inline const bool __is_transparently_comparable_v = false;
+
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP___FUNCTIONAL_IS_TRANSPARENT
diff --git a/libcxx/include/__functional/operations.h b/libcxx/include/__functional/operations.h
index 7b0ea11..7f315ca 100644
--- a/libcxx/include/__functional/operations.h
+++ b/libcxx/include/__functional/operations.h
@@ -15,7 +15,9 @@
#include <__functional/unary_function.h>
#include <__fwd/functional.h>
#include <__type_traits/desugars_to.h>
+#include <__type_traits/is_generic_transparent_comparator.h>
#include <__type_traits/is_integral.h>
+#include <__type_traits/make_transparent.h>
#include <__utility/forward.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -377,6 +379,14 @@ struct less<void> {
typedef void is_transparent;
};
+template <class _Tp>
+struct __make_transparent<less<_Tp> > {
+ using type _LIBCPP_NODEBUG = less<>;
+};
+
+template <>
+inline const bool __is_generic_transparent_comparator_v<less<>> = true;
+
template <class _Tp, class _Up>
inline const bool __desugars_to_v<__less_tag, less<>, _Tp, _Up> = true;
@@ -466,6 +476,14 @@ struct greater<void> {
template <class _Tp, class _Up>
inline const bool __desugars_to_v<__greater_tag, greater<>, _Tp, _Up> = true;
+
+template <class _Tp>
+struct __make_transparent<greater<_Tp>> {
+ using type _LIBCPP_NODEBUG = greater<>;
+};
+
+template <>
+inline const bool __is_generic_transparent_comparator_v<greater<>> = true;
#endif
// Logical operations
diff --git a/libcxx/include/__functional/ranges_operations.h b/libcxx/include/__functional/ranges_operations.h
index df95843..dc9da06 100644
--- a/libcxx/include/__functional/ranges_operations.h
+++ b/libcxx/include/__functional/ranges_operations.h
@@ -14,6 +14,7 @@
#include <__concepts/totally_ordered.h>
#include <__config>
#include <__type_traits/desugars_to.h>
+#include <__type_traits/is_generic_transparent_comparator.h>
#include <__utility/forward.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -108,6 +109,12 @@ inline const bool __desugars_to_v<__less_tag, ranges::less, _Tp, _Up> = true;
template <class _Tp, class _Up>
inline const bool __desugars_to_v<__greater_tag, ranges::greater, _Tp, _Up> = true;
+template <>
+inline const bool __is_generic_transparent_comparator_v<ranges::less> = true;
+
+template <>
+inline const bool __is_generic_transparent_comparator_v<ranges::greater> = true;
+
#endif // _LIBCPP_STD_VER >= 20
_LIBCPP_END_NAMESPACE_STD
diff --git a/libcxx/include/__tree b/libcxx/include/__tree
index 61c910c..ef960d4 100644
--- a/libcxx/include/__tree
+++ b/libcxx/include/__tree
@@ -34,6 +34,7 @@
#include <__type_traits/is_same.h>
#include <__type_traits/is_specialization.h>
#include <__type_traits/is_swappable.h>
+#include <__type_traits/make_transparent.h>
#include <__type_traits/remove_const.h>
#include <__utility/forward.h>
#include <__utility/lazy_synth_three_way_comparator.h>
@@ -1749,7 +1750,8 @@ __tree<_Tp, _Compare, _Allocator>::__find_equal(const _Key& __v) {
}
__node_base_pointer* __node_ptr = __root_ptr();
- auto __comp = __lazy_synth_three_way_comparator<_Compare, _Key, value_type>(value_comp());
+ auto&& __transparent = std::__as_transparent(value_comp());
+ auto __comp = __lazy_synth_three_way_comparator<__make_transparent_t<_Compare>, _Key, value_type>(__transparent);
while (true) {
auto __comp_res = __comp(__v, __nd->__get_value());
diff --git a/libcxx/include/__type_traits/is_generic_transparent_comparator.h b/libcxx/include/__type_traits/is_generic_transparent_comparator.h
new file mode 100644
index 0000000..fd02c0b
--- /dev/null
+++ b/libcxx/include/__type_traits/is_generic_transparent_comparator.h
@@ -0,0 +1,30 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_IS_GENERIC_TRANSPARENT_COMPARATOR_H
+#define _LIBCPP___TYPE_TRAITS_IS_GENERIC_TRANSPARENT_COMPARATOR_H
+
+#include <__config>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+// This traits returns true if the given _Comparator is known to accept any two types for compaison. This is separate
+// from `__is_transparent_v`, since that only enables overloads of specific functions, but doesn't give any semantic
+// guarantees. This trait guarantess that the comparator simply calls the appropriate comparison functions for any two
+// types.
+
+template <class _Comparator>
+inline const bool __is_generic_transparent_comparator_v = false;
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_IS_GENERIC_TRANSPARENT_COMPARATOR_H
diff --git a/libcxx/include/__type_traits/make_transparent.h b/libcxx/include/__type_traits/make_transparent.h
new file mode 100644
index 0000000..4d3207a
--- /dev/null
+++ b/libcxx/include/__type_traits/make_transparent.h
@@ -0,0 +1,48 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_MAKE_TRANSPARENT_H
+#define _LIBCPP___TYPE_TRAITS_MAKE_TRANSPARENT_H
+
+#include <__config>
+#include <__type_traits/enable_if.h>
+#include <__type_traits/is_empty.h>
+#include <__type_traits/is_same.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+// __make_transparent tries to create a transparent comparator from its non-transparent counterpart, e.g. obtain
+// `less<>` from `less<T>`. This is useful in cases where conversions can be avoided (e.g. a string literal to a
+// std::string).
+
+template <class _Comparator>
+struct __make_transparent {
+ using type _LIBCPP_NODEBUG = _Comparator;
+};
+
+template <class _Comparator>
+using __make_transparent_t _LIBCPP_NODEBUG = typename __make_transparent<_Comparator>::type;
+
+template <class _Comparator, __enable_if_t<is_same<_Comparator, __make_transparent_t<_Comparator> >::value, int> = 0>
+_LIBCPP_HIDE_FROM_ABI _Comparator& __as_transparent(_Comparator& __comp) {
+ return __comp;
+}
+
+template <class _Comparator, __enable_if_t<!is_same<_Comparator, __make_transparent_t<_Comparator> >::value, int> = 0>
+_LIBCPP_HIDE_FROM_ABI __make_transparent_t<_Comparator> __as_transparent(_Comparator&) {
+ static_assert(is_empty<_Comparator>::value);
+ return __make_transparent_t<_Comparator>();
+}
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_MAKE_TRANSPARENT_H
diff --git a/libcxx/include/map b/libcxx/include/map
index a63dfec..035f913 100644
--- a/libcxx/include/map
+++ b/libcxx/include/map
@@ -600,7 +600,10 @@ erase_if(multimap<Key, T, Compare, Allocator>& c, Predicate pred); // C++20
# include <__ranges/from_range.h>
# include <__tree>
# include <__type_traits/container_traits.h>
+# include <__type_traits/desugars_to.h>
# include <__type_traits/is_allocator.h>
+# include <__type_traits/is_convertible.h>
+# include <__type_traits/make_transparent.h>
# include <__type_traits/remove_const.h>
# include <__type_traits/type_identity.h>
# include <__utility/forward.h>
@@ -666,6 +669,11 @@ public:
# endif
};
+template <class _Key, class _MapValueT, class _Compare>
+struct __make_transparent<__map_value_compare<_Key, _MapValueT, _Compare> > {
+ using type _LIBCPP_NODEBUG = __map_value_compare<_Key, _MapValueT, __make_transparent_t<_Compare> >;
+};
+
# if _LIBCPP_STD_VER >= 14
template <class _MapValueT, class _Key, class _Compare>
struct __lazy_synth_three_way_comparator<__map_value_compare<_Key, _MapValueT, _Compare>, _MapValueT, _MapValueT> {
@@ -1048,6 +1056,24 @@ public:
_LIBCPP_HIDE_FROM_ABI mapped_type& operator[](key_type&& __k);
# endif
+ template <class _Arg,
+ __enable_if_t<__is_transparently_comparable_v<_Compare, key_type, __remove_cvref_t<_Arg> >, int> = 0>
+ _LIBCPP_HIDE_FROM_ABI mapped_type& at(_Arg&& __arg) {
+ auto [_, __child] = __tree_.__find_equal(__arg);
+ if (__child == nullptr)
+ std::__throw_out_of_range("map::at: key not found");
+ return static_cast<__node_pointer>(__child)->__get_value().second;
+ }
+
+ template <class _Arg,
+ __enable_if_t<__is_transparently_comparable_v<_Compare, key_type, __remove_cvref_t<_Arg> >, int> = 0>
+ _LIBCPP_HIDE_FROM_ABI const mapped_type& at(_Arg&& __arg) const {
+ auto [_, __child] = __tree_.__find_equal(__arg);
+ if (__child == nullptr)
+ std::__throw_out_of_range("map::at: key not found");
+ return static_cast<__node_pointer>(__child)->__get_value().second;
+ }
+
_LIBCPP_HIDE_FROM_ABI mapped_type& at(const key_type& __k);
_LIBCPP_HIDE_FROM_ABI const mapped_type& at(const key_type& __k) const;
@@ -1242,11 +1268,15 @@ public:
_LIBCPP_HIDE_FROM_ABI iterator find(const key_type& __k) { return __tree_.find(__k); }
_LIBCPP_HIDE_FROM_ABI const_iterator find(const key_type& __k) const { return __tree_.find(__k); }
# if _LIBCPP_STD_VER >= 14
- template <typename _K2, enable_if_t<__is_transparent_v<_Compare, _K2>, int> = 0>
+ template <typename _K2,
+ enable_if_t<__is_transparent_v<_Compare, _K2> || __is_transparently_comparable_v<_Compare, key_type, _K2>,
+ int> = 0>
_LIBCPP_HIDE_FROM_ABI iterator find(const _K2& __k) {
return __tree_.find(__k);
}
- template <typename _K2, enable_if_t<__is_transparent_v<_Compare, _K2>, int> = 0>
+ template <typename _K2,
+ enable_if_t<__is_transparent_v<_Compare, _K2> || __is_transparently_comparable_v<_Compare, key_type, _K2>,
+ int> = 0>
_LIBCPP_HIDE_FROM_ABI const_iterator find(const _K2& __k) const {
return __tree_.find(__k);
}
@@ -1262,7 +1292,9 @@ public:
# if _LIBCPP_STD_VER >= 20
_LIBCPP_HIDE_FROM_ABI bool contains(const key_type& __k) const { return find(__k) != end(); }
- template <typename _K2, enable_if_t<__is_transparent_v<_Compare, _K2>, int> = 0>
+ template <typename _K2,
+ enable_if_t<__is_transparent_v<_Compare, _K2> || __is_transparently_comparable_v<_Compare, key_type, _K2>,
+ int> = 0>
_LIBCPP_HIDE_FROM_ABI bool contains(const _K2& __k) const {
return find(__k) != end();
}
@@ -1271,12 +1303,16 @@ public:
_LIBCPP_HIDE_FROM_ABI iterator lower_bound(const key_type& __k) { return __tree_.lower_bound(__k); }
_LIBCPP_HIDE_FROM_ABI const_iterator lower_bound(const key_type& __k) const { return __tree_.lower_bound(__k); }
# if _LIBCPP_STD_VER >= 14
- template <typename _K2, enable_if_t<__is_transparent_v<_Compare, _K2>, int> = 0>
+ template <typename _K2,
+ enable_if_t<__is_transparent_v<_Compare, _K2> || __is_transparently_comparable_v<_Compare, key_type, _K2>,
+ int> = 0>
_LIBCPP_HIDE_FROM_ABI iterator lower_bound(const _K2& __k) {
return __tree_.lower_bound(__k);
}
- template <typename _K2, enable_if_t<__is_transparent_v<_Compare, _K2>, int> = 0>
+ template <typename _K2,
+ enable_if_t<__is_transparent_v<_Compare, _K2> || __is_transparently_comparable_v<_Compare, key_type, _K2>,
+ int> = 0>
_LIBCPP_HIDE_FROM_ABI const_iterator lower_bound(const _K2& __k) const {
return __tree_.lower_bound(__k);
}
@@ -1285,11 +1321,15 @@ public:
_LIBCPP_HIDE_FROM_ABI iterator upper_bound(const key_type& __k) { return __tree_.upper_bound(__k); }
_LIBCPP_HIDE_FROM_ABI const_iterator upper_bound(const key_type& __k) const { return __tree_.upper_bound(__k); }
# if _LIBCPP_STD_VER >= 14
- template <typename _K2, enable_if_t<__is_transparent_v<_Compare, _K2>, int> = 0>
+ template <typename _K2,
+ enable_if_t<__is_transparent_v<_Compare, _K2> || __is_transparently_comparable_v<_Compare, key_type, _K2>,
+ int> = 0>
_LIBCPP_HIDE_FROM_ABI iterator upper_bound(const _K2& __k) {
return __tree_.upper_bound(__k);
}
- template <typename _K2, enable_if_t<__is_transparent_v<_Compare, _K2>, int> = 0>
+ template <typename _K2,
+ enable_if_t<__is_transparent_v<_Compare, _K2> || __is_transparently_comparable_v<_Compare, key_type, _K2>,
+ int> = 0>
_LIBCPP_HIDE_FROM_ABI const_iterator upper_bound(const _K2& __k) const {
return __tree_.upper_bound(__k);
}
diff --git a/libcxx/include/module.modulemap.in b/libcxx/include/module.modulemap.in
index 93d43f8..894093b 100644
--- a/libcxx/include/module.modulemap.in
+++ b/libcxx/include/module.modulemap.in
@@ -200,6 +200,7 @@ module std_core [system] {
header "__type_traits/is_fundamental.h"
export std_core.type_traits.integral_constant
}
+ module is_generic_transparent_comparator { header "__type_traits/is_generic_transparent_comparator.h" }
module is_implicit_lifetime {
header "__type_traits/is_implicit_lifetime.h"
export std_core.type_traits.integral_constant
@@ -353,6 +354,7 @@ module std_core [system] {
module make_32_64_or_128_bit { header "__type_traits/make_32_64_or_128_bit.h" }
module make_const_lvalue_ref { header "__type_traits/make_const_lvalue_ref.h" }
module make_signed { header "__type_traits/make_signed.h" }
+ module make_transparent { header "__type_traits/make_transparent.h" }
module make_unsigned { header "__type_traits/make_unsigned.h" }
module maybe_const { header "__type_traits/maybe_const.h" }
module nat { header "__type_traits/nat.h" }
diff --git a/libcxx/include/string b/libcxx/include/string
index cfd6861..dc562e0 100644
--- a/libcxx/include/string
+++ b/libcxx/include/string
@@ -600,6 +600,7 @@ basic_string<char32_t> operator""s( const char32_t *str, size_t len );
# include <__debug_utils/sanitizers.h>
# include <__format/enable_insertable.h>
# include <__functional/hash.h>
+# include <__functional/is_transparent.h>
# include <__functional/unary_function.h>
# include <__fwd/string.h>
# include <__iterator/bounded_iter.h>
@@ -628,6 +629,7 @@ basic_string<char32_t> operator""s( const char32_t *str, size_t len );
# include <__type_traits/is_allocator.h>
# include <__type_traits/is_array.h>
# include <__type_traits/is_convertible.h>
+# include <__type_traits/is_generic_transparent_comparator.h>
# include <__type_traits/is_nothrow_assignable.h>
# include <__type_traits/is_nothrow_constructible.h>
# include <__type_traits/is_replaceable.h>
@@ -2567,6 +2569,20 @@ struct __default_three_way_comparator<basic_string<_CharT, _Traits, _Alloc>, bas
};
# endif
+template <class _Comparator, class _CharT, class _Traits, class _Alloc>
+inline const bool __is_transparently_comparable_v<_Comparator,
+ basic_string<_CharT, _Traits, _Alloc>,
+ const _CharT*,
+ __enable_if_t<__is_generic_transparent_comparator_v<_Comparator> > > =
+ true;
+
+template <class _Comparator, class _CharT, class _Traits, class _Alloc, size_t _Np>
+inline const bool __is_transparently_comparable_v<_Comparator,
+ basic_string<_CharT, _Traits, _Alloc>,
+ _CharT[_Np],
+ __enable_if_t<__is_generic_transparent_comparator_v<_Comparator> > > =
+ true;
+
# if _LIBCPP_STD_VER >= 17
template <class _InputIterator,
class _CharT = __iter_value_type<_InputIterator>,
diff --git a/libcxx/test/benchmarks/containers/associative/map.bench.cpp b/libcxx/test/benchmarks/containers/associative/map.bench.cpp
index bd664db..142229a 100644
--- a/libcxx/test/benchmarks/containers/associative/map.bench.cpp
+++ b/libcxx/test/benchmarks/containers/associative/map.bench.cpp
@@ -16,6 +16,19 @@
#include "../../GenerateInput.h"
#include "benchmark/benchmark.h"
+static void BM_map_find_string_literal(benchmark::State& state) {
+ std::map<std::string, int> map;
+ map.emplace("Something very very long to show a long string situation", 1);
+ map.emplace("Something Else", 2);
+
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(map);
+ benchmark::DoNotOptimize(map.find("Something very very long to show a long string situation"));
+ }
+}
+
+BENCHMARK(BM_map_find_string_literal);
+
template <class K, class V>
struct support::adapt_operations<std::map<K, V>> {
using ValueType = typename std::map<K, V>::value_type;
diff --git a/libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp b/libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp
index 57adec2..d670c53 100644
--- a/libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp
+++ b/libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp
@@ -15,6 +15,19 @@
#include "../../GenerateInput.h"
#include "benchmark/benchmark.h"
+static void BM_map_find_string_literal(benchmark::State& state) {
+ std::unordered_map<std::string, int> map;
+ map.emplace("Something very very long to show a long string situation", 1);
+ map.emplace("Something Else", 2);
+
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(map);
+ benchmark::DoNotOptimize(map.find("Something very very long to show a long string situation"));
+ }
+}
+
+BENCHMARK(BM_map_find_string_literal);
+
template <class K, class V>
struct support::adapt_operations<std::unordered_map<K, V>> {
using ValueType = typename std::unordered_map<K, V>::value_type;
diff --git a/libcxx/test/libcxx/algorithms/cpp17_iterator_concepts.verify.cpp b/libcxx/test/libcxx/algorithms/cpp17_iterator_concepts.verify.cpp
index 629a887..70341ee 100644
--- a/libcxx/test/libcxx/algorithms/cpp17_iterator_concepts.verify.cpp
+++ b/libcxx/test/libcxx/algorithms/cpp17_iterator_concepts.verify.cpp
@@ -143,7 +143,7 @@ void check_forward_iterator_requirements() {
// expected-note@*:* {{because 'not_default_constructible' does not satisfy '__cpp17_default_constructible'}}
_LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(postincrement_not_ref, ""); // expected-error {{static assertion failed}}
#ifndef _AIX
- // expected-note-re@*:* {{because type constraint 'convertible_to<{{(valid_iterator<postincrement_not_ref>::)?}}Proxy, const postincrement_not_ref &>' was not satisfied}}
+ // expected-note-re@*:* {{'convertible_to<{{(valid_iterator<postincrement_not_ref>::)?}}Proxy, const postincrement_not_ref &>'}}
#endif
}
@@ -173,7 +173,7 @@ void check_bidirectional_iterator_requirements() {
_LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(missing_postdecrement, ""); // expected-error {{static assertion failed}}
// expected-note@*:* {{cannot decrement value of type 'missing_postdecrement'}}
_LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(not_returning_iter_reference, ""); // expected-error {{static assertion failed}}
- // expected-note-re@*:* {{because type constraint 'same_as<int, __iter_reference<not_returning_iter_reference>{{ ?}}>' was not satisfied}}
+ // expected-note-re@*:* {{'same_as<int, __iter_reference<not_returning_iter_reference>{{ ?}}>'}}
// clang-format on
}
diff --git a/libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp b/libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp
index 056d6f6..2c3751a 100644
--- a/libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp
+++ b/libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp
@@ -12,6 +12,9 @@
// Assertion failed: N->getValueType(0) == MVT::v1i1 && "Expected v1i1 type"
// XFAIL: target=armv7-unknown-linux-gnueabihf
+// FIXME: This should work with -flax-vector-conversions=none
+// ADDITIONAL_COMPILE_FLAGS(clang): -flax-vector-conversions=integer
+
// <experimental/simd>
//
// [simd.class]
diff --git a/libcxx/test/std/language.support/support.exception/propagation/make_exception_ptr.objc.pass.mm b/libcxx/test/std/language.support/support.exception/propagation/make_exception_ptr.objc.pass.mm
index 05a6698..de38305 100644
--- a/libcxx/test/std/language.support/support.exception/propagation/make_exception_ptr.objc.pass.mm
+++ b/libcxx/test/std/language.support/support.exception/propagation/make_exception_ptr.objc.pass.mm
@@ -17,7 +17,8 @@
// out-of-the-box.
// REQUIRES: has-fobjc-arc && darwin
-// ADDITIONAL_COMPILE_FLAGS: -fobjc-arc
+// FIXME: including <Foundation/Foundation.h> seems to be currently broken with modules enabled
+// ADDITIONAL_COMPILE_FLAGS: -fobjc-arc -fno-modules
#include <cassert>
#include <exception>
diff --git a/libcxx/utils/find-rerun-candidates b/libcxx/utils/find-rerun-candidates
new file mode 100755
index 0000000..5ac2644
--- /dev/null
+++ b/libcxx/utils/find-rerun-candidates
@@ -0,0 +1,242 @@
+#!/usr/bin/env python3
+
+import argparse
+import datetime
+import functools
+import os
+import pathlib
+import re
+import statistics
+import subprocess
+import sys
+
+import git
+import pandas
+import tqdm
+
+@functools.total_ordering
+class Commit:
+ """
+ This class represents a commit inside a given Git repository.
+ """
+
+ def __init__(self, git_repo, sha):
+ self._git_repo = git_repo
+ self._sha = sha
+
+ def __eq__(self, other):
+ """
+ Return whether two commits refer to the same commit.
+
+ This doesn't take into account the content of the Git tree at those commits, only the
+ 'identity' of the commits themselves.
+ """
+ return self.fullrev == other.fullrev
+
+ def __lt__(self, other):
+ """
+ Return whether a commit is an ancestor of another commit in the Git repository.
+ """
+ # Is self._sha an ancestor of other._sha?
+ res = subprocess.run(['git', '-C', self._git_repo, 'merge-base', '--is-ancestor', self._sha, other._sha])
+ if res.returncode not in (0, 1):
+ raise RuntimeError(f'Error when trying to obtain the commit order for {self._sha} and {other._sha}')
+ return res.returncode == 0
+
+ def __hash__(self):
+ """
+ Return the full revision for this commit.
+ """
+ return hash(self.fullrev)
+
+ @functools.cache
+ def show(self, include_diff=False):
+ """
+ Return the commit information equivalent to `git show` associated to this commit.
+ """
+ cmd = ['git', '-C', self._git_repo, 'show', self._sha]
+ if not include_diff:
+ cmd.append('--no-patch')
+ return subprocess.check_output(cmd, text=True)
+
+ @functools.cached_property
+ def shortrev(self):
+ """
+ Return the shortened version of the given SHA.
+ """
+ return subprocess.check_output(['git', '-C', self._git_repo, 'rev-parse', '--short', self._sha], text=True).strip()
+
+ @functools.cached_property
+ def fullrev(self):
+ """
+ Return the full SHA associated to this commit.
+ """
+ return subprocess.check_output(['git', '-C', self._git_repo, 'rev-parse', self._sha], text=True).strip()
+
+ @functools.cached_property
+ def commit_date(self):
+ """
+ Return the date of the commit as a `datetime.datetime` object.
+ """
+ repo = git.Repo(self._git_repo)
+ return datetime.datetime.fromtimestamp(repo.commit(self._sha).committed_date)
+
+ def prefetch(self):
+ """
+ Prefetch cached properties associated to this commit object.
+
+ This makes it possible to control when time is spent recovering that information from Git for
+ e.g. better reporting to the user.
+ """
+ self.commit_date
+ self.fullrev
+ self.shortrev
+ self.show()
+
+ def __str__(self):
+ return self._sha
+
+def directory_path(string):
+ if os.path.isdir(string):
+ return pathlib.Path(string)
+ else:
+ raise NotADirectoryError(string)
+
+def parse_lnt(lines, aggregate=statistics.median):
+ """
+ Parse lines in LNT format and return a list of dictionnaries of the form:
+
+ [
+ {
+ 'benchmark': <benchmark1>,
+ <metric1>: [float],
+ <metric2>: [float],
+ 'data_points': int,
+ ...
+ },
+ {
+ 'benchmark': <benchmark2>,
+ <metric1>: [float],
+ <metric2>: [float],
+ 'data_points': int,
+ ...
+ },
+ ...
+ ]
+
+ If a metric has multiple values associated to it, they are aggregated into a single
+ value using the provided aggregation function.
+ """
+ results = {}
+ for line in lines:
+ line = line.strip()
+ if not line:
+ continue
+
+ (identifier, value) = line.split(' ')
+ (benchmark, metric) = identifier.split('.')
+ if benchmark not in results:
+ results[benchmark] = {'benchmark': benchmark}
+
+ entry = results[benchmark]
+ if metric not in entry:
+ entry[metric] = []
+ entry[metric].append(float(value))
+
+ for (bm, entry) in results.items():
+ metrics = [key for key in entry if isinstance(entry[key], list)]
+ min_data_points = min(len(entry[metric]) for metric in metrics)
+ for metric in metrics:
+ entry[metric] = aggregate(entry[metric])
+ entry['data_points'] = min_data_points
+
+ return list(results.values())
+
+def sorted_revlist(git_repo, commits):
+ """
+ Return the list of commits sorted by their chronological order (from oldest to newest) in the
+ provided Git repository. Items earlier in the list are older than items later in the list.
+ """
+ revlist_cmd = ['git', '-C', git_repo, 'rev-list', '--no-walk'] + list(commits)
+ revlist = subprocess.check_output(revlist_cmd, text=True).strip().splitlines()
+ return list(reversed(revlist))
+
+def main(argv):
+ parser = argparse.ArgumentParser(
+ prog='find-rerun-candidates',
+ description='Find benchmarking data points that are good candidates for additional runs, to reduce noise.')
+ parser.add_argument('directory', type=directory_path,
+ help='Path to a valid directory containing benchmark data in LNT format, each file being named <commit>.lnt. '
+ 'This is also the format generated by the `benchmark-historical` utility.')
+ parser.add_argument('--metric', type=str, default='execution_time',
+ help='The metric to analyze. LNT data may contain multiple metrics (e.g. code size, execution time, etc) -- '
+ 'this option allows selecting which metric is analyzed for rerun candidates. The default is "execution_time".')
+ parser.add_argument('--filter', type=str, required=False,
+ help='An optional regular expression used to filter the benchmarks included in the analysis. '
+ 'Only benchmarks whose names match the regular expression will be analyzed.')
+ parser.add_argument('--outlier-threshold', metavar='FLOAT', type=float, default=0.1,
+ help='Relative difference from the previous points for considering a data point as an outlier. This threshold is '
+ 'expressed as a floating point number, e.g. 0.25 will detect points that differ by more than 25%% from their '
+ 'previous result.')
+ parser.add_argument('--data-points-threshold', type=int, required=False,
+ help='Number of data points above which an outlier is not considered an outlier. If an outlier has more than '
+ 'that number of data points yet its relative difference is above the threshold, it is not considered an '
+ 'outlier. This can be used to re-run noisy data points until we have at least N samples, at which point '
+ 'we consider the data to be accurate, even if the result is beyond the threshold. By default, there is '
+ 'no limit on the number of data points.')
+ parser.add_argument('--git-repo', type=directory_path, default=pathlib.Path(os.getcwd()),
+ help='Path to the git repository to use for ordering commits in time. '
+ 'By default, the current working directory is used.')
+ args = parser.parse_args(argv)
+
+ # Extract benchmark data from the directory.
+ data = {}
+ files = [f for f in args.directory.glob('*.lnt')]
+ for file in tqdm.tqdm(files, desc='Parsing LNT files'):
+ rows = parse_lnt(file.read_text().splitlines())
+ (commit, _) = os.path.splitext(os.path.basename(file))
+ commit = Commit(args.git_repo, commit)
+ data[commit] = rows
+
+ # Obtain commit information which is then cached throughout the program. Do this
+ # eagerly so we can provide a progress bar.
+ for commit in tqdm.tqdm(data.keys(), desc='Prefetching Git information'):
+ commit.prefetch()
+
+ # Create a dataframe from the raw data and add some columns to it:
+ # - 'commit' represents the Commit object associated to the results in that row
+ # - `revlist_order` represents the order of the commit within the Git repository.
+ revlist = sorted_revlist(args.git_repo, [c.fullrev for c in data.keys()])
+ data = pandas.DataFrame([row | {'commit': c} for (c, rows) in data.items() for row in rows])
+ data = data.join(pandas.DataFrame([{'revlist_order': revlist.index(c.fullrev)} for c in data['commit']]))
+
+ # Filter the benchmarks if needed.
+ if args.filter is not None:
+ keeplist = [b for b in data['benchmark'] if re.search(args.filter, b) is not None]
+ data = data[data['benchmark'].isin(keeplist)]
+
+ # Detect outliers by selecting all benchmarks whose change percentage is beyond the threshold.
+ # If we have a max number of points, also take that into account.
+ if args.data_points_threshold is not None:
+ print(f'Generating outliers with more than {args.outlier_threshold * 100}% relative difference and less than {args.data_points_threshold} data points')
+ else:
+ print(f'Generating outliers with more than {args.outlier_threshold * 100}% relative difference')
+
+ overall = set()
+ for (benchmark, series) in data.sort_values(by='revlist_order').groupby('benchmark'):
+ pct_change = series[args.metric].pct_change()
+ outliers = series[pct_change.abs() > args.outlier_threshold]
+ if args.data_points_threshold is not None:
+ outliers = outliers[outliers['data_points'] < args.data_points_threshold]
+ outliers = set(outliers['commit'])
+ overall |= outliers
+ if len(outliers) > 0:
+ print(f'{benchmark}: {" ".join(c.shortrev for c in outliers)}')
+
+ if len(overall) > 0:
+ print(f'Summary: {" ".join(c.shortrev for c in overall)}')
+ else:
+ print(f'No outliers')
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/libcxx/utils/libcxx/test/params.py b/libcxx/utils/libcxx/test/params.py
index 6f013a7..c02d6df 100644
--- a/libcxx/utils/libcxx/test/params.py
+++ b/libcxx/utils/libcxx/test/params.py
@@ -75,6 +75,9 @@ _warningFlags = [
# We're not annotating all the APIs, since that's a lot of annotations compared to how many we actually care about
"-Wno-nullability-completeness",
+
+ # Technically not a warning flag, but might as well be:
+ "-flax-vector-conversions=none",
]
_allStandards = ["c++03", "c++11", "c++14", "c++17", "c++20", "c++23", "c++26"]
diff --git a/libcxx/utils/visualize-historical b/libcxx/utils/visualize-historical
index ef28e8b..114c7e8 100755
--- a/libcxx/utils/visualize-historical
+++ b/libcxx/utils/visualize-historical
@@ -213,13 +213,6 @@ def main(argv):
'Since the chart is interactive, it generally makes most sense to include all the benchmarks '
'and to then filter them in the browser, but in some cases producing a chart with a reduced '
'number of data series is useful.')
- parser.add_argument('--find-outliers', metavar='FLOAT', type=float, required=False,
- help='Instead of building a chart, detect commits that show a large spike (more than the given relative threshold) '
- 'with the previous result and print those to standard output. This can be used to generate a list of '
- 'potential outliers that we might want to re-generate the data for. The threshold is expressed as a '
- 'floating point number, e.g. 0.25 will detect points that differ by more than 25%% from their previous '
- 'result. This option respects --filter, i.e. only benchmarks that match the filter will be analyzed for '
- 'outliers.')
parser.add_argument('--subtitle', type=str, required=False,
help='Optional subtitle for the chart. This can be used to help identify the contents of the chart.')
parser.add_argument('--git-repo', type=directory_path, default=pathlib.Path(os.getcwd()),
@@ -258,16 +251,6 @@ def main(argv):
keeplist = [b for b in data['benchmark'] if re.search(args.filter, b) is not None]
data = data[data['benchmark'].isin(keeplist)]
- # If requested, perform a basic pass to detect outliers.
- # Note that we consider a commit to be an outlier if any of the benchmarks for that commit is an outlier.
- if args.find_outliers is not None:
- threshold = args.find_outliers
- outliers = set()
- for (benchmark, series) in data.sort_values(by='revlist_order').groupby('benchmark'):
- outliers |= set(series[series[args.metric].pct_change() > threshold]['commit'])
- print(f'Outliers (more than {threshold * 100}%): {" ".join(c.shortrev for c in outliers)}')
- return
-
# Plot the data for all the required benchmarks.
figure = create_plot(data, args.metric, subtitle=args.subtitle)
do_open = args.output is None or args.open
diff --git a/libunwind/test/configs/cmake-bridge.cfg.in b/libunwind/test/configs/cmake-bridge.cfg.in
index b804c21..e40497b 100644
--- a/libunwind/test/configs/cmake-bridge.cfg.in
+++ b/libunwind/test/configs/cmake-bridge.cfg.in
@@ -14,6 +14,7 @@
import os, site
site.addsitedir(os.path.join('@LIBUNWIND_LIBCXX_PATH@', 'utils'))
import libcxx.test.format
+from lit.util import which
# Basic configuration of the test suite
config.name = os.path.basename('@LIBUNWIND_TEST_CONFIG@')
@@ -33,3 +34,13 @@ config.substitutions.append(('%{install-prefix}', '@LIBUNWIND_TESTING_INSTALL_PR
config.substitutions.append(('%{include}', '@LIBUNWIND_TESTING_INSTALL_PREFIX@/include'))
config.substitutions.append(('%{lib}', '@LIBUNWIND_TESTING_INSTALL_PREFIX@/@LIBUNWIND_INSTALL_LIBRARY_DIR@'))
config.substitutions.append(('%{benchmark_flags}', ''))
+
+# Check for objcopy tools
+objcopy_path = which('llvm-objcopy', '@LLVM_BUILD_BINARY_DIR@/bin')
+if not objcopy_path:
+ objcopy_path = which('llvm-objcopy')
+if not objcopy_path:
+ objcopy_path = which('objcopy')
+if objcopy_path:
+ config.substitutions.append(('%{objcopy}', objcopy_path))
+ config.available_features.add('objcopy-available')
diff --git a/libunwind/test/eh_frame_fde_pc_range.pass.cpp b/libunwind/test/eh_frame_fde_pc_range.pass.cpp
index 39c8e80..852612b 100644
--- a/libunwind/test/eh_frame_fde_pc_range.pass.cpp
+++ b/libunwind/test/eh_frame_fde_pc_range.pass.cpp
@@ -14,16 +14,15 @@
// clang-format off
// REQUIRES: target={{x86_64-.+-linux-gnu}}
-// aarch64,arm have a cross toolchain build(llvm-clang-win-x-aarch64, etc)
-// where objdump is not available.
+// REQUIRES: objcopy-available
// TODO: Figure out why this fails with Memory Sanitizer.
// XFAIL: msan
// RUN: %{build}
-// RUN: objcopy --dump-section .eh_frame_hdr=%t_ehf_hdr.bin %t.exe
+// RUN: %{objcopy} --dump-section .eh_frame_hdr=%t_ehf_hdr.bin %t.exe
// RUN: echo -ne '\xFF' | dd of=%t_ehf_hdr.bin bs=1 seek=2 count=2 conv=notrunc status=none
-// RUN: objcopy --update-section .eh_frame_hdr=%t_ehf_hdr.bin %t.exe
+// RUN: %{objcopy} --update-section .eh_frame_hdr=%t_ehf_hdr.bin %t.exe
// RUN: %{exec} %t.exe
// clang-format on
diff --git a/lld/COFF/Driver.cpp b/lld/COFF/Driver.cpp
index a59cc06..3676b88 100644
--- a/lld/COFF/Driver.cpp
+++ b/lld/COFF/Driver.cpp
@@ -2104,18 +2104,18 @@ void LinkerDriver::linkerMain(ArrayRef<const char *> argsArr) {
config->dtltoDistributor = args.getLastArgValue(OPT_thinlto_distributor);
// Handle /thinlto-distributor-arg:<arg>
- for (auto *arg : args.filtered(OPT_thinlto_distributor_arg))
- config->dtltoDistributorArgs.push_back(arg->getValue());
+ config->dtltoDistributorArgs =
+ args::getStrings(args, OPT_thinlto_distributor_arg);
// Handle /thinlto-remote-compiler:<path>
- config->dtltoCompiler = args.getLastArgValue(OPT_thinlto_compiler);
+ config->dtltoCompiler = args.getLastArgValue(OPT_thinlto_remote_compiler);
if (!config->dtltoDistributor.empty() && config->dtltoCompiler.empty())
Err(ctx) << "A value must be specified for /thinlto-remote-compiler if "
"/thinlto-distributor is specified.";
// Handle /thinlto-remote-compiler-arg:<arg>
- for (auto *arg : args.filtered(OPT_thinlto_compiler_arg))
- config->dtltoCompilerArgs.push_back(arg->getValue());
+ config->dtltoCompilerArgs =
+ args::getStrings(args, OPT_thinlto_remote_compiler_arg);
// Handle /dwodir
config->dwoDir = args.getLastArgValue(OPT_dwodir);
diff --git a/lld/COFF/Options.td b/lld/COFF/Options.td
index 485db5a..f3d0eb3 100644
--- a/lld/COFF/Options.td
+++ b/lld/COFF/Options.td
@@ -289,10 +289,10 @@ def thinlto_distributor : P<"thinlto-distributor",
"backend compilations will be distributed">;
def thinlto_distributor_arg : P<"thinlto-distributor-arg",
"Arguments to pass to the ThinLTO distributor">;
-def thinlto_compiler : P<"thinlto-remote-compiler",
+def thinlto_remote_compiler : P<"thinlto-remote-compiler",
"Compiler for the ThinLTO distributor to invoke for ThinLTO backend "
"compilations">;
-def thinlto_compiler_arg : P<"thinlto-remote-compiler-arg",
+def thinlto_remote_compiler_arg : P<"thinlto-remote-compiler-arg",
"Compiler arguments for the ThinLTO distributor to pass for ThinLTO backend "
"compilations">;
def lto_obj_path : P<
diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp
index 1beab8d..62f7fff 100644
--- a/lld/ELF/Driver.cpp
+++ b/lld/ELF/Driver.cpp
@@ -1399,8 +1399,9 @@ static void readConfigs(Ctx &ctx, opt::InputArgList &args) {
ctx.arg.dtltoDistributor = args.getLastArgValue(OPT_thinlto_distributor_eq);
ctx.arg.dtltoDistributorArgs =
args::getStrings(args, OPT_thinlto_distributor_arg);
- ctx.arg.dtltoCompiler = args.getLastArgValue(OPT_thinlto_compiler_eq);
- ctx.arg.dtltoCompilerArgs = args::getStrings(args, OPT_thinlto_compiler_arg);
+ ctx.arg.dtltoCompiler = args.getLastArgValue(OPT_thinlto_remote_compiler_eq);
+ ctx.arg.dtltoCompilerArgs =
+ args::getStrings(args, OPT_thinlto_remote_compiler_arg);
ctx.arg.dwoDir = args.getLastArgValue(OPT_plugin_opt_dwo_dir_eq);
ctx.arg.dynamicLinker = getDynamicLinker(ctx, args);
ctx.arg.ehFrameHdr =
diff --git a/lld/ELF/Options.td b/lld/ELF/Options.td
index f052318..0d6dda4 100644
--- a/lld/ELF/Options.td
+++ b/lld/ELF/Options.td
@@ -722,11 +722,11 @@ def thinlto_distributor_eq: JJ<"thinlto-distributor=">,
"ThinLTO backend compilations will be distributed">;
defm thinlto_distributor_arg: EEq<"thinlto-distributor-arg", "Arguments to "
"pass to the ThinLTO distributor">;
-def thinlto_compiler_eq: JJ<"thinlto-remote-compiler=">,
+def thinlto_remote_compiler_eq: JJ<"thinlto-remote-compiler=">,
HelpText<"Compiler for the ThinLTO distributor to invoke for ThinLTO backend "
"compilations">;
-defm thinlto_compiler_arg: EEq<"thinlto-remote-compiler-arg", "Compiler "
- "arguments for the ThinLTO distributor to pass for ThinLTO backend "
+defm thinlto_remote_compiler_arg: EEq<"thinlto-remote-compiler-arg",
+ "Compiler arguments for the ThinLTO distributor to pass for ThinLTO backend "
"compilations">;
defm fat_lto_objects: BB<"fat-lto-objects",
"Use the .llvm.lto section, which contains LLVM bitcode, in fat LTO object files to perform LTO.",
diff --git a/lldb/include/lldb/Core/Mangled.h b/lldb/include/lldb/Core/Mangled.h
index 47f1c6a8..546d7a9b 100644
--- a/lldb/include/lldb/Core/Mangled.h
+++ b/lldb/include/lldb/Core/Mangled.h
@@ -148,13 +148,7 @@ public:
/// Mangled name get accessor.
///
/// \return
- /// A reference to the mangled name string object.
- ConstString &GetMangledName() { return m_mangled; }
-
- /// Mangled name get accessor.
- ///
- /// \return
- /// A const reference to the mangled name string object.
+ /// The mangled name string object.
ConstString GetMangledName() const { return m_mangled; }
/// Best name get accessor.
@@ -251,7 +245,7 @@ public:
/// \return
/// eManglingSchemeNone if no known mangling scheme could be identified
/// for s, otherwise the enumerator for the mangling scheme detected.
- static Mangled::ManglingScheme GetManglingScheme(llvm::StringRef const name);
+ static Mangled::ManglingScheme GetManglingScheme(llvm::StringRef name);
static bool IsMangledName(llvm::StringRef name);
diff --git a/lldb/include/lldb/Target/Statistics.h b/lldb/include/lldb/Target/Statistics.h
index d6983bb..26538352 100644
--- a/lldb/include/lldb/Target/Statistics.h
+++ b/lldb/include/lldb/Target/Statistics.h
@@ -322,12 +322,14 @@ public:
void IncreaseSourceRealpathCompatibleCount(uint32_t count);
StatsDuration &GetCreateTime() { return m_create_time; }
+ StatsDuration &GetLoadCoreTime() { return m_load_core_time; }
StatsSuccessFail &GetExpressionStats() { return m_expr_eval; }
StatsSuccessFail &GetFrameVariableStats() { return m_frame_var; }
void Reset(Target &target);
protected:
StatsDuration m_create_time;
+ StatsDuration m_load_core_time;
std::optional<StatsTimepoint> m_launch_or_attach_time;
std::optional<StatsTimepoint> m_first_private_stop_time;
std::optional<StatsTimepoint> m_first_public_stop_time;
diff --git a/lldb/packages/Python/lldbsuite/test/cpu_feature.py b/lldb/packages/Python/lldbsuite/test/cpu_feature.py
index b46a5ac..d7668c1 100644
--- a/lldb/packages/Python/lldbsuite/test/cpu_feature.py
+++ b/lldb/packages/Python/lldbsuite/test/cpu_feature.py
@@ -62,7 +62,7 @@ class CPUFeature:
class AArch64:
FPMR = CPUFeature("fpmr")
GCS = CPUFeature("gcs")
- MTE = CPUFeature("mte")
+ MTE = CPUFeature("mte", "hw.optional.arm.FEAT_MTE4")
MTE_STORE_ONLY = CPUFeature("mtestoreonly")
PTR_AUTH = CPUFeature("paca", "hw.optional.arm.FEAT_PAuth2")
SME = CPUFeature("sme", "hw.optional.arm.FEAT_SME")
diff --git a/lldb/source/API/SBTarget.cpp b/lldb/source/API/SBTarget.cpp
index eb56337..0d03250 100644
--- a/lldb/source/API/SBTarget.cpp
+++ b/lldb/source/API/SBTarget.cpp
@@ -255,6 +255,7 @@ SBProcess SBTarget::LoadCore(const char *core_file, lldb::SBError &error) {
ProcessSP process_sp(target_sp->CreateProcess(
target_sp->GetDebugger().GetListener(), "", &filespec, false));
if (process_sp) {
+ ElapsedTime load_core_time(target_sp->GetStatistics().GetLoadCoreTime());
error.SetError(process_sp->LoadCore());
if (error.Success())
sb_process.SetSP(process_sp);
diff --git a/lldb/source/Commands/CommandObjectTarget.cpp b/lldb/source/Commands/CommandObjectTarget.cpp
index 940be42..c59d028 100644
--- a/lldb/source/Commands/CommandObjectTarget.cpp
+++ b/lldb/source/Commands/CommandObjectTarget.cpp
@@ -418,7 +418,11 @@ protected:
if (process_sp) {
// Seems weird that we Launch a core file, but that is what we
// do!
- error = process_sp->LoadCore();
+ {
+ ElapsedTime load_core_time(
+ target_sp->GetStatistics().GetLoadCoreTime());
+ error = process_sp->LoadCore();
+ }
if (error.Fail()) {
result.AppendError(error.AsCString("unknown core file format"));
diff --git a/lldb/source/Core/Mangled.cpp b/lldb/source/Core/Mangled.cpp
index 91b9c00..0780846 100644
--- a/lldb/source/Core/Mangled.cpp
+++ b/lldb/source/Core/Mangled.cpp
@@ -40,7 +40,7 @@ bool Mangled::IsMangledName(llvm::StringRef name) {
return Mangled::GetManglingScheme(name) != Mangled::eManglingSchemeNone;
}
-Mangled::ManglingScheme Mangled::GetManglingScheme(llvm::StringRef const name) {
+Mangled::ManglingScheme Mangled::GetManglingScheme(llvm::StringRef name) {
if (name.empty())
return Mangled::eManglingSchemeNone;
diff --git a/lldb/source/Expression/IRExecutionUnit.cpp b/lldb/source/Expression/IRExecutionUnit.cpp
index 25d4a87..60b9de0 100644
--- a/lldb/source/Expression/IRExecutionUnit.cpp
+++ b/lldb/source/Expression/IRExecutionUnit.cpp
@@ -751,7 +751,12 @@ ResolveFunctionCallLabel(FunctionCallLabel &label,
sc_list.Append(*sc_or_err);
LoadAddressResolver resolver(*sc.target_sp, symbol_was_missing_weak);
- return resolver.Resolve(sc_list).value_or(LLDB_INVALID_ADDRESS);
+ lldb::addr_t resolved_addr =
+ resolver.Resolve(sc_list).value_or(LLDB_INVALID_ADDRESS);
+ if (resolved_addr == LLDB_INVALID_ADDRESS)
+ return llvm::createStringError("couldn't resolve address for function");
+
+ return resolved_addr;
}
lldb::addr_t
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
index 924953c..3c49c91 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
@@ -792,7 +792,7 @@ ClangExpressionParser::ClangExpressionParser(
// 6. Set up the source management objects inside the compiler
m_compiler->createFileManager();
if (!m_compiler->hasSourceManager())
- m_compiler->createSourceManager(m_compiler->getFileManager());
+ m_compiler->createSourceManager();
m_compiler->createPreprocessor(TU_Complete);
switch (expr.Language().AsLanguageType()) {
diff --git a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp
index 4e8a430..a2199cb 100644
--- a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp
+++ b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp
@@ -104,10 +104,10 @@ CPlusPlusLanguage::GetFunctionNameInfo(ConstString name) const {
}
bool CPlusPlusLanguage::SymbolNameFitsToLanguage(Mangled mangled) const {
- const char *mangled_name = mangled.GetMangledName().GetCString();
- auto mangling_scheme = Mangled::GetManglingScheme(mangled_name);
- return mangled_name && (mangling_scheme == Mangled::eManglingSchemeItanium ||
- mangling_scheme == Mangled::eManglingSchemeMSVC);
+ auto mangling_scheme =
+ Mangled::GetManglingScheme(mangled.GetMangledName().GetStringRef());
+ return mangling_scheme == Mangled::eManglingSchemeItanium ||
+ mangling_scheme == Mangled::eManglingSchemeMSVC;
}
ConstString CPlusPlusLanguage::GetDemangledFunctionNameWithoutArguments(
diff --git a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp
index fada1fd..9cdb846 100644
--- a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp
+++ b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp
@@ -2067,6 +2067,43 @@ static bool ParseTrieEntries(DataExtractor &data, lldb::offset_t offset,
return true;
}
+static bool
+TryParseV2ObjCMetadataSymbol(const char *&symbol_name,
+ const char *&symbol_name_non_abi_mangled,
+ SymbolType &type) {
+ static constexpr llvm::StringLiteral g_objc_v2_prefix_class("_OBJC_CLASS_$_");
+ static constexpr llvm::StringLiteral g_objc_v2_prefix_metaclass(
+ "_OBJC_METACLASS_$_");
+ static constexpr llvm::StringLiteral g_objc_v2_prefix_ivar("_OBJC_IVAR_$_");
+
+ llvm::StringRef symbol_name_ref(symbol_name);
+ if (symbol_name_ref.empty())
+ return false;
+
+ if (symbol_name_ref.starts_with(g_objc_v2_prefix_class)) {
+ symbol_name_non_abi_mangled = symbol_name + 1;
+ symbol_name = symbol_name + g_objc_v2_prefix_class.size();
+ type = eSymbolTypeObjCClass;
+ return true;
+ }
+
+ if (symbol_name_ref.starts_with(g_objc_v2_prefix_metaclass)) {
+ symbol_name_non_abi_mangled = symbol_name + 1;
+ symbol_name = symbol_name + g_objc_v2_prefix_metaclass.size();
+ type = eSymbolTypeObjCMetaClass;
+ return true;
+ }
+
+ if (symbol_name_ref.starts_with(g_objc_v2_prefix_ivar)) {
+ symbol_name_non_abi_mangled = symbol_name + 1;
+ symbol_name = symbol_name + g_objc_v2_prefix_ivar.size();
+ type = eSymbolTypeObjCIVar;
+ return true;
+ }
+
+ return false;
+}
+
static SymbolType GetSymbolType(const char *&symbol_name,
bool &demangled_is_synthesized,
const SectionSP &text_section_sp,
@@ -2183,9 +2220,6 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) {
lldb::offset_t offset = MachHeaderSizeFromMagic(m_header.magic);
uint32_t i;
FileSpecList dylib_files;
- llvm::StringRef g_objc_v2_prefix_class("_OBJC_CLASS_$_");
- llvm::StringRef g_objc_v2_prefix_metaclass("_OBJC_METACLASS_$_");
- llvm::StringRef g_objc_v2_prefix_ivar("_OBJC_IVAR_$_");
UUID image_uuid;
for (i = 0; i < m_header.ncmds; ++i) {
@@ -2805,36 +2839,15 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) {
is_gsym = true;
sym[sym_idx].SetExternal(true);
- if (symbol_name && symbol_name[0] == '_' &&
- symbol_name[1] == 'O') {
- llvm::StringRef symbol_name_ref(symbol_name);
- if (symbol_name_ref.starts_with(
- g_objc_v2_prefix_class)) {
- symbol_name_non_abi_mangled = symbol_name + 1;
- symbol_name =
- symbol_name + g_objc_v2_prefix_class.size();
- type = eSymbolTypeObjCClass;
- demangled_is_synthesized = true;
-
- } else if (symbol_name_ref.starts_with(
- g_objc_v2_prefix_metaclass)) {
- symbol_name_non_abi_mangled = symbol_name + 1;
- symbol_name =
- symbol_name + g_objc_v2_prefix_metaclass.size();
- type = eSymbolTypeObjCMetaClass;
- demangled_is_synthesized = true;
- } else if (symbol_name_ref.starts_with(
- g_objc_v2_prefix_ivar)) {
- symbol_name_non_abi_mangled = symbol_name + 1;
- symbol_name =
- symbol_name + g_objc_v2_prefix_ivar.size();
- type = eSymbolTypeObjCIVar;
- demangled_is_synthesized = true;
- }
+ if (TryParseV2ObjCMetadataSymbol(
+ symbol_name, symbol_name_non_abi_mangled,
+ type)) {
+ demangled_is_synthesized = true;
} else {
if (nlist.n_value != 0)
symbol_section = section_info.GetSection(
nlist.n_sect, nlist.n_value);
+
type = eSymbolTypeData;
}
break;
@@ -3320,48 +3333,10 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) {
symbol_sect_name) {
type = eSymbolTypeRuntime;
- if (symbol_name) {
- llvm::StringRef symbol_name_ref(symbol_name);
- if (symbol_name_ref.starts_with("_OBJC_")) {
- llvm::StringRef
- g_objc_v2_prefix_class(
- "_OBJC_CLASS_$_");
- llvm::StringRef
- g_objc_v2_prefix_metaclass(
- "_OBJC_METACLASS_$_");
- llvm::StringRef
- g_objc_v2_prefix_ivar("_OBJC_IVAR_$_");
- if (symbol_name_ref.starts_with(
- g_objc_v2_prefix_class)) {
- symbol_name_non_abi_mangled =
- symbol_name + 1;
- symbol_name =
- symbol_name +
- g_objc_v2_prefix_class.size();
- type = eSymbolTypeObjCClass;
- demangled_is_synthesized = true;
- } else if (
- symbol_name_ref.starts_with(
- g_objc_v2_prefix_metaclass)) {
- symbol_name_non_abi_mangled =
- symbol_name + 1;
- symbol_name =
- symbol_name +
- g_objc_v2_prefix_metaclass.size();
- type = eSymbolTypeObjCMetaClass;
- demangled_is_synthesized = true;
- } else if (symbol_name_ref.starts_with(
- g_objc_v2_prefix_ivar)) {
- symbol_name_non_abi_mangled =
- symbol_name + 1;
- symbol_name =
- symbol_name +
- g_objc_v2_prefix_ivar.size();
- type = eSymbolTypeObjCIVar;
- demangled_is_synthesized = true;
- }
- }
- }
+ if (TryParseV2ObjCMetadataSymbol(
+ symbol_name,
+ symbol_name_non_abi_mangled, type))
+ demangled_is_synthesized = true;
} else if (symbol_sect_name &&
::strstr(symbol_sect_name,
"__gcc_except_tab") ==
@@ -3652,7 +3627,7 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) {
if (is_debug) {
switch (nlist.n_type) {
- case N_GSYM:
+ case N_GSYM: {
// global symbol: name,,NO_SECT,type,0
// Sometimes the N_GSYM value contains the address.
@@ -3668,33 +3643,17 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) {
is_gsym = true;
sym[sym_idx].SetExternal(true);
- if (symbol_name && symbol_name[0] == '_' && symbol_name[1] == 'O') {
- llvm::StringRef symbol_name_ref(symbol_name);
- if (symbol_name_ref.starts_with(g_objc_v2_prefix_class)) {
- symbol_name_non_abi_mangled = symbol_name + 1;
- symbol_name = symbol_name + g_objc_v2_prefix_class.size();
- type = eSymbolTypeObjCClass;
- demangled_is_synthesized = true;
-
- } else if (symbol_name_ref.starts_with(
- g_objc_v2_prefix_metaclass)) {
- symbol_name_non_abi_mangled = symbol_name + 1;
- symbol_name = symbol_name + g_objc_v2_prefix_metaclass.size();
- type = eSymbolTypeObjCMetaClass;
- demangled_is_synthesized = true;
- } else if (symbol_name_ref.starts_with(g_objc_v2_prefix_ivar)) {
- symbol_name_non_abi_mangled = symbol_name + 1;
- symbol_name = symbol_name + g_objc_v2_prefix_ivar.size();
- type = eSymbolTypeObjCIVar;
- demangled_is_synthesized = true;
- }
+ if (TryParseV2ObjCMetadataSymbol(symbol_name,
+ symbol_name_non_abi_mangled, type)) {
+ demangled_is_synthesized = true;
} else {
if (nlist.n_value != 0)
symbol_section =
section_info.GetSection(nlist.n_sect, nlist.n_value);
+
type = eSymbolTypeData;
}
- break;
+ } break;
case N_FNAME:
// procedure name (f77 kludge): name,,NO_SECT,0,0
@@ -4130,38 +4089,9 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) {
::strstr(symbol_sect_name, "__objc") == symbol_sect_name) {
type = eSymbolTypeRuntime;
- if (symbol_name) {
- llvm::StringRef symbol_name_ref(symbol_name);
- if (symbol_name_ref.starts_with("_OBJC_")) {
- llvm::StringRef g_objc_v2_prefix_class(
- "_OBJC_CLASS_$_");
- llvm::StringRef g_objc_v2_prefix_metaclass(
- "_OBJC_METACLASS_$_");
- llvm::StringRef g_objc_v2_prefix_ivar(
- "_OBJC_IVAR_$_");
- if (symbol_name_ref.starts_with(g_objc_v2_prefix_class)) {
- symbol_name_non_abi_mangled = symbol_name + 1;
- symbol_name =
- symbol_name + g_objc_v2_prefix_class.size();
- type = eSymbolTypeObjCClass;
- demangled_is_synthesized = true;
- } else if (symbol_name_ref.starts_with(
- g_objc_v2_prefix_metaclass)) {
- symbol_name_non_abi_mangled = symbol_name + 1;
- symbol_name =
- symbol_name + g_objc_v2_prefix_metaclass.size();
- type = eSymbolTypeObjCMetaClass;
- demangled_is_synthesized = true;
- } else if (symbol_name_ref.starts_with(
- g_objc_v2_prefix_ivar)) {
- symbol_name_non_abi_mangled = symbol_name + 1;
- symbol_name =
- symbol_name + g_objc_v2_prefix_ivar.size();
- type = eSymbolTypeObjCIVar;
- demangled_is_synthesized = true;
- }
- }
- }
+ if (TryParseV2ObjCMetadataSymbol(
+ symbol_name, symbol_name_non_abi_mangled, type))
+ demangled_is_synthesized = true;
} else if (symbol_sect_name &&
::strstr(symbol_sect_name, "__gcc_except_tab") ==
symbol_sect_name) {
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index a5aaf1f..21c265e 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -960,6 +960,12 @@ CompilerType TypeSystemClang::GetBuiltinTypeForDWARFEncodingAndBitSize(
if (type_name == "long double" &&
QualTypeMatchesBitSize(bit_size, ast, ast.LongDoubleTy))
return GetType(ast.LongDoubleTy);
+ if (type_name == "__bf16" &&
+ QualTypeMatchesBitSize(bit_size, ast, ast.BFloat16Ty))
+ return GetType(ast.BFloat16Ty);
+ if (type_name == "_Float16" &&
+ QualTypeMatchesBitSize(bit_size, ast, ast.Float16Ty))
+ return GetType(ast.Float16Ty);
// As Rust currently uses `TypeSystemClang`, match `f128` here as well so it
// doesn't get misinterpreted as `long double` on targets where they are
// the same size but different formats.
@@ -1792,6 +1798,8 @@ bool TypeSystemClang::RecordHasFields(const RecordDecl *record_decl) {
for (base_class = cxx_record_decl->bases_begin(),
base_class_end = cxx_record_decl->bases_end();
base_class != base_class_end; ++base_class) {
+ assert(record_decl != base_class->getType()->getAsCXXRecordDecl() &&
+ "Base can't inherit from itself.");
if (RecordHasFields(base_class->getType()->getAsCXXRecordDecl()))
return true;
}
diff --git a/lldb/source/Target/Statistics.cpp b/lldb/source/Target/Statistics.cpp
index 8ad8d50..f7311a8b 100644
--- a/lldb/source/Target/Statistics.cpp
+++ b/lldb/source/Target/Statistics.cpp
@@ -148,6 +148,11 @@ TargetStats::ToJSON(Target &target,
target_metrics_json.try_emplace("targetCreateTime",
m_create_time.get().count());
+ if (m_load_core_time.get().count() > 0) {
+ target_metrics_json.try_emplace("loadCoreTime",
+ m_load_core_time.get().count());
+ }
+
json::Array breakpoints_array;
double totalBreakpointResolveTime = 0.0;
// Report both the normal breakpoint list and the internal breakpoint list.
diff --git a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/unordered_map-iterator/TestDataFormatterStdUnorderedMap.py b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/unordered_map-iterator/TestDataFormatterStdUnorderedMap.py
index 1e920fa..45f7b5b 100644
--- a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/unordered_map-iterator/TestDataFormatterStdUnorderedMap.py
+++ b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/unordered_map-iterator/TestDataFormatterStdUnorderedMap.py
@@ -124,11 +124,6 @@ class StdUnorderedMapDataFormatterTestCase(TestBase):
self.check_ptr_ptr("ptr5")
self.check_ptr_ptr("ptr6")
- @expectedFailureAll(
- bugnumber="https://github.com/llvm/llvm-project/issues/146040",
- compiler="clang",
- compiler_version=["<", "21"],
- )
@add_test_categories(["libc++"])
def test_ptr_libcxx(self):
self.build(dictionary={"USE_LIBCPP": 1})
diff --git a/lldb/test/API/functionalities/json/symbol-file/Makefile b/lldb/test/API/functionalities/json/symbol-file/Makefile
index 13bc164..5d05d95f 100644
--- a/lldb/test/API/functionalities/json/symbol-file/Makefile
+++ b/lldb/test/API/functionalities/json/symbol-file/Makefile
@@ -1,4 +1,5 @@
C_SOURCES := main.c
+CFLAGS_EXTRAS := -no-pie
all: stripped.out
diff --git a/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py b/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py
index f06c9ae..d7249df 100644
--- a/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py
+++ b/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py
@@ -1,6 +1,7 @@
# Test the SBAPI for GetStatistics()
import json
+
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
@@ -54,6 +55,11 @@ class TestStatsAPI(TestBase):
stats_json,
'Make sure the "frameVariable" key in in target.GetStatistics()["targets"][0]',
)
+ self.assertNotIn(
+ "loadCoreTime",
+ stats_json,
+ "LoadCoreTime should not be present in a live, non-coredump target",
+ )
expressionEvaluation = stats_json["expressionEvaluation"]
self.assertIn(
"successes",
@@ -157,3 +163,25 @@ class TestStatsAPI(TestBase):
stats_force.GetAsJSON(stream_force)
debug_stats_force = json.loads(stream_force.GetData())
self.assertEqual(debug_stats_force["totalDebugInfoByteSize"], 445)
+
+ def test_core_load_time(self):
+ """
+ Test to see if the coredump path is included in statistics dump.
+ """
+ yaml_file = "arm64-minidump-build-ids.yaml"
+ src_dir = self.getSourceDir()
+ minidump_path = self.getBuildArtifact(os.path.basename(yaml_file) + ".dmp")
+ self.yaml2obj(os.path.join(src_dir, yaml_file), minidump_path)
+ target = self.dbg.CreateTarget(None)
+ process = target.LoadCore(minidump_path)
+ self.assertTrue(process.IsValid())
+
+ stats_options = lldb.SBStatisticsOptions()
+ stats = target.GetStatistics(stats_options)
+ stream = lldb.SBStream()
+ stats.GetAsJSON(stream)
+ debug_stats = json.loads(stream.GetData())
+ self.assertTrue("targets" in debug_stats)
+ target_info = debug_stats["targets"][0]
+ self.assertTrue("loadCoreTime" in target_info)
+ self.assertTrue(float(target_info["loadCoreTime"]) > 0.0)
diff --git a/lldb/test/API/functionalities/stats_api/arm64-minidump-build-ids.yaml b/lldb/test/API/functionalities/stats_api/arm64-minidump-build-ids.yaml
new file mode 100644
index 0000000..4acbc40
--- /dev/null
+++ b/lldb/test/API/functionalities/stats_api/arm64-minidump-build-ids.yaml
@@ -0,0 +1,19 @@
+--- !minidump
+Streams:
+ - Type: SystemInfo
+ Processor Arch: ARM
+ Platform ID: Linux
+ CSD Version: '15E216'
+ CPU:
+ CPUID: 0x00000000
+ - Type: ModuleList
+ Modules:
+ - Base of Image: 0x0000000000001000
+ Size of Image: 0x00001000
+ Module Name: '/tmp/a'
+ CodeView Record: 4C4570420102030405060708090A0B0C0D0E0F1011121314
+ - Base of Image: 0x0000000000001000
+ Size of Image: 0x00001000
+ Module Name: '/tmp/b'
+ CodeView Record: 4C4570420A141E28323C46505A646E78828C96A0AAB4BEC8
+...
diff --git a/lldb/test/API/lang/cpp/abi_tag_structors/TestAbiTagStructors.py b/lldb/test/API/lang/cpp/abi_tag_structors/TestAbiTagStructors.py
index 87d8adb..2d3e4f7 100644
--- a/lldb/test/API/lang/cpp/abi_tag_structors/TestAbiTagStructors.py
+++ b/lldb/test/API/lang/cpp/abi_tag_structors/TestAbiTagStructors.py
@@ -10,6 +10,11 @@ from lldbsuite.test import lldbutil
class AbiTagStructorsTestCase(TestBase):
+ @skipIf(
+ compiler="clang",
+ compiler_version=["<", "22"],
+ bugnumber="Required Clang flag not supported",
+ )
@expectedFailureAll(oslist=["windows"])
def test_with_structor_linkage_names(self):
self.build(dictionary={"CXXFLAGS_EXTRAS": "-gstructor-decl-linkage-names"})
@@ -73,7 +78,16 @@ class AbiTagStructorsTestCase(TestBase):
Test that without linkage names on structor declarations we can't call
ABI-tagged structors.
"""
- self.build(dictionary={"CXXFLAGS_EXTRAS": "-gno-structor-decl-linkage-names"})
+ # In older versions of Clang the -gno-structor-decl-linkage-names
+ # behaviour was the default.
+ if self.expectedCompiler(["clang"]) and self.expectedCompilerVersion(
+ [">=", "22.0"]
+ ):
+ self.build(
+ dictionary={"CXXFLAGS_EXTRAS": "-gno-structor-decl-linkage-names"}
+ )
+ else:
+ self.build()
lldbutil.run_to_source_breakpoint(
self, "Break here", lldb.SBFileSpec("main.cpp", False)
@@ -105,12 +119,23 @@ class AbiTagStructorsTestCase(TestBase):
"expression TaggedLocal()", error=True, substrs=["Couldn't look up symbols"]
)
+ @skipIf(compiler="clang", compiler_version=["<", "22"])
@expectedFailureAll(oslist=["windows"])
- def test_nested_no_structor_linkage_names(self):
+ def test_nested_with_structor_linkage_names(self):
self.build(dictionary={"CXXFLAGS_EXTRAS": "-gstructor-decl-linkage-names"})
self.do_nested_structor_test()
@expectedFailureAll(oslist=["windows"])
- def test_nested_with_structor_linkage_names(self):
- self.build(dictionary={"CXXFLAGS_EXTRAS": "-gno-structor-decl-linkage-names"})
+ def test_nested_no_structor_linkage_names(self):
+ # In older versions of Clang the -gno-structor-decl-linkage-names
+ # behaviour was the default.
+ if self.expectedCompiler(["clang"]) and self.expectedCompilerVersion(
+ [">=", "22.0"]
+ ):
+ self.build(
+ dictionary={"CXXFLAGS_EXTRAS": "-gno-structor-decl-linkage-names"}
+ )
+ else:
+ self.build()
+
self.do_nested_structor_test()
diff --git a/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py b/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py
index c0545c70..b3bed43 100644
--- a/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py
+++ b/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py
@@ -6,6 +6,11 @@ from lldbsuite.test import lldbutil
class ExprDefinitionInDylibTestCase(TestBase):
+ @skipIf(
+ compiler="clang",
+ compiler_version=["<", "22"],
+ bugnumber="Required Clang flag not supported",
+ )
@skipIfWindows
def test_with_structor_linkage_names(self):
"""
@@ -74,7 +79,16 @@ class ExprDefinitionInDylibTestCase(TestBase):
Tests that if structor declarations don't have linkage names, we can't
call ABI-tagged constructors. But non-tagged ones are fine.
"""
- self.build(dictionary={"CXXFLAGS_EXTRAS": "-gno-structor-decl-linkage-names"})
+ # In older versions of Clang the -gno-structor-decl-linkage-names
+ # behaviour was the default.
+ if self.expectedCompiler(["clang"]) and self.expectedCompilerVersion(
+ [">=", "22.0"]
+ ):
+ self.build(
+ dictionary={"CXXFLAGS_EXTRAS": "-gno-structor-decl-linkage-names"}
+ )
+ else:
+ self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
@@ -95,6 +109,6 @@ class ExprDefinitionInDylibTestCase(TestBase):
self.expect_expr("Foo(10)", result_type="Foo")
- self.expect("Base()", error=True)
+ self.expect("expr Base()", error=True)
- self.expect("Bar()", error=True)
+ self.expect("expr Bar()", error=True)
diff --git a/lldb/test/API/lang/cpp/floating-types-specialization/Makefile b/lldb/test/API/lang/cpp/floating-types-specialization/Makefile
new file mode 100644
index 0000000..99998b2
--- /dev/null
+++ b/lldb/test/API/lang/cpp/floating-types-specialization/Makefile
@@ -0,0 +1,3 @@
+CXX_SOURCES := main.cpp
+
+include Makefile.rules
diff --git a/lldb/test/API/lang/cpp/floating-types-specialization/TestCppFloatingTypesSpecialization.py b/lldb/test/API/lang/cpp/floating-types-specialization/TestCppFloatingTypesSpecialization.py
new file mode 100644
index 0000000..f4530cd
--- /dev/null
+++ b/lldb/test/API/lang/cpp/floating-types-specialization/TestCppFloatingTypesSpecialization.py
@@ -0,0 +1,36 @@
+import lldb
+import lldbsuite.test.lldbplatformutil as lldbplatformutil
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class TestCase(TestBase):
+ def test(self):
+ self.build()
+ lldbutil.run_to_source_breakpoint(
+ self, "// break here", lldb.SBFileSpec("main.cpp", False)
+ )
+
+ # On 32-bit Arm, you have to have the bfloat16 extension, or an FPU while
+ # not using the soft float mode. The target we assume has none of that
+ # so instead of __bf16 we get __fp16.
+ is_arm_32_bit = lldbplatformutil.getArchitecture() == "arm"
+
+ self.expect_expr(
+ "f0", result_type=("Foo<__fp16>" if is_arm_32_bit else "Foo<__bf16>")
+ )
+
+ # When __bf16 is actually __fp16, f1 looks like it inherits from itself.
+ # Which clang allows but LLDB fails to evaluate.
+ if not is_arm_32_bit:
+ self.expect_expr("f1", result_type="Foo<__fp16>")
+
+ # Test sizeof to ensure while computing layout we don't do
+ # infinite recursion.
+ v = self.frame().EvaluateExpression("sizeof(f0)")
+ self.assertEqual(v.GetValueAsUnsigned() > 0, True)
+
+ if not is_arm_32_bit:
+ v = self.frame().EvaluateExpression("sizeof(f1)")
+ self.assertEqual(v.GetValueAsUnsigned() > 0, True)
diff --git a/lldb/test/API/lang/cpp/floating-types-specialization/main.cpp b/lldb/test/API/lang/cpp/floating-types-specialization/main.cpp
new file mode 100644
index 0000000..e3e8a37
--- /dev/null
+++ b/lldb/test/API/lang/cpp/floating-types-specialization/main.cpp
@@ -0,0 +1,11 @@
+template <typename T> struct Foo;
+
+template <> struct Foo<__bf16> {};
+
+template <> struct Foo<_Float16> : Foo<__bf16> {};
+
+int main() {
+ Foo<__bf16> f0;
+ Foo<_Float16> f1;
+ return 0; // break here
+}
diff --git a/lldb/test/API/lang/cpp/function-call-from-object-file/Makefile b/lldb/test/API/lang/cpp/function-call-from-object-file/Makefile
new file mode 100644
index 0000000..285bbfb
--- /dev/null
+++ b/lldb/test/API/lang/cpp/function-call-from-object-file/Makefile
@@ -0,0 +1,3 @@
+CXX_SOURCES := main.cpp lib1.cpp lib2.cpp
+
+include Makefile.rules
diff --git a/lldb/test/API/lang/cpp/function-call-from-object-file/TestFunctionCallFromObjectFile.py b/lldb/test/API/lang/cpp/function-call-from-object-file/TestFunctionCallFromObjectFile.py
new file mode 100644
index 0000000..f0a7aef
--- /dev/null
+++ b/lldb/test/API/lang/cpp/function-call-from-object-file/TestFunctionCallFromObjectFile.py
@@ -0,0 +1,29 @@
+"""
+Tests that we can call functions that have definitions in multiple
+CUs in the debug-info (which is the case for functions defined in headers).
+The linker will most likely de-duplicate the functiond definitions when linking
+the final executable. On Darwin, this will create a debug-map that LLDB will use
+to fix up object file addresses to addresses in the linked executable. However,
+if we parsed the DIE from the object file whose functiond definition got stripped
+by the linker, LLDB needs to ensure it can still resolve the function symbol it
+got for it.
+"""
+
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class TestFunctionCallFromObjectFile(TestBase):
+ def test_lib1(self):
+ self.build()
+ lldbutil.run_to_name_breakpoint(self, "lib1_func")
+
+ self.expect_expr("Foo{}.foo()", result_type="int", result_value="15")
+
+ def test_lib2(self):
+ self.build()
+ lldbutil.run_to_name_breakpoint(self, "lib2_func")
+
+ self.expect_expr("Foo{}.foo()", result_type="int", result_value="15")
diff --git a/lldb/test/API/lang/cpp/function-call-from-object-file/common.h b/lldb/test/API/lang/cpp/function-call-from-object-file/common.h
new file mode 100644
index 0000000..76e23be
--- /dev/null
+++ b/lldb/test/API/lang/cpp/function-call-from-object-file/common.h
@@ -0,0 +1,8 @@
+#ifndef COMMON_H_IN
+#define COMMON_H_IN
+
+struct Foo {
+ int foo() { return 15; }
+};
+
+#endif // COMMON_H_IN
diff --git a/lldb/test/API/lang/cpp/function-call-from-object-file/lib1.cpp b/lldb/test/API/lang/cpp/function-call-from-object-file/lib1.cpp
new file mode 100644
index 0000000..b97bcc1
--- /dev/null
+++ b/lldb/test/API/lang/cpp/function-call-from-object-file/lib1.cpp
@@ -0,0 +1,8 @@
+#include "common.h"
+
+// Parameter "Foo*" forces LLDB to parse "Foo" from the object
+// file that it is stopped in.
+void lib1_func(Foo *) {
+ // Force definition into lib1.o debug-info.
+ Foo{}.foo();
+}
diff --git a/lldb/test/API/lang/cpp/function-call-from-object-file/lib2.cpp b/lldb/test/API/lang/cpp/function-call-from-object-file/lib2.cpp
new file mode 100644
index 0000000..2f9d81a
--- /dev/null
+++ b/lldb/test/API/lang/cpp/function-call-from-object-file/lib2.cpp
@@ -0,0 +1,6 @@
+#include "common.h"
+
+void lib2_func(Foo *) {
+ // Force definition into lib2.o debug-info.
+ Foo{}.foo();
+}
diff --git a/lldb/test/API/lang/cpp/function-call-from-object-file/main.cpp b/lldb/test/API/lang/cpp/function-call-from-object-file/main.cpp
new file mode 100644
index 0000000..61ca798
--- /dev/null
+++ b/lldb/test/API/lang/cpp/function-call-from-object-file/main.cpp
@@ -0,0 +1,10 @@
+struct Foo;
+
+extern void lib1_func(Foo *);
+extern void lib2_func(Foo *);
+
+int main() {
+ lib1_func(nullptr);
+ lib2_func(nullptr);
+ return 0;
+}
diff --git a/lldb/test/API/lang/cpp/structured-binding/TestStructuredBinding.py b/lldb/test/API/lang/cpp/structured-binding/TestStructuredBinding.py
index 5f939ec..882c91d 100644
--- a/lldb/test/API/lang/cpp/structured-binding/TestStructuredBinding.py
+++ b/lldb/test/API/lang/cpp/structured-binding/TestStructuredBinding.py
@@ -99,16 +99,21 @@ class TestStructuredBinding(TestBase):
self.expect_expr("ty2", result_value="'z'")
self.expect_expr("tz2", result_value="10")
- self.expect(
- "frame variable",
- substrs=[
- "tx1 =",
- "ty1 =",
- "tz1 =",
- "tx2 =",
- "ty2 =",
- "tz2 =",
- "mp1 =",
- "mp2 =",
- ],
- )
+ # Older versions of Clang marked structured binding variables
+ # as artificial, and thus LLDB wouldn't display them.
+ if self.expectedCompiler(["clang"]) and self.expectedCompilerVersion(
+ [">=", "22.0"]
+ ):
+ self.expect(
+ "frame variable",
+ substrs=[
+ "tx1 =",
+ "ty1 =",
+ "tz1 =",
+ "tx2 =",
+ "ty2 =",
+ "tz2 =",
+ "mp1 =",
+ "mp2 =",
+ ],
+ )
diff --git a/lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py b/lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py
index eac7b5e..83c0572 100644
--- a/lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py
+++ b/lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py
@@ -1,4 +1,5 @@
import lldb
+import lldbsuite.test.lldbplatformutil as lldbplatformutil
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
@@ -82,8 +83,12 @@ class TestCase(TestBase):
value = self.expect_expr("temp7", result_type="Foo<__fp16, __fp16>")
self.assertFalse(value.GetType().GetTemplateArgumentValue(target, 1))
- value = self.expect_expr("temp8", result_type="Foo<__fp16, __fp16>")
- self.assertFalse(value.GetType().GetTemplateArgumentValue(target, 1))
+ # The target we use when evaluating these expressions for Arm leads to there
+ # not being a __bf16 type in the AST so we fall back to __fp16 and evaluating
+ # this fails.
+ if lldbplatformutil.getArchitecture() != "arm":
+ value = self.expect_expr("temp8", result_type="Foo<__bf16, __bf16>")
+ self.assertFalse(value.GetType().GetTemplateArgumentValue(target, 1))
value = self.expect_expr("temp9", result_type="Bar<double, 1.200000e+00>")
template_param_value = value.GetType().GetTemplateArgumentValue(target, 1)
diff --git a/lldb/test/API/macosx/mte/Makefile b/lldb/test/API/macosx/mte/Makefile
new file mode 100644
index 0000000..cb20942
--- /dev/null
+++ b/lldb/test/API/macosx/mte/Makefile
@@ -0,0 +1,12 @@
+C_SOURCES := main.c
+
+EXE := uaf_mte
+
+all: uaf_mte sign
+
+include Makefile.rules
+
+sign: mte-entitlements.plist uaf_mte
+ifeq ($(OS),Darwin)
+ codesign -s - -f --entitlements $^
+endif
diff --git a/lldb/test/API/macosx/mte/TestDarwinMTE.py b/lldb/test/API/macosx/mte/TestDarwinMTE.py
new file mode 100644
index 0000000..ef858b1
--- /dev/null
+++ b/lldb/test/API/macosx/mte/TestDarwinMTE.py
@@ -0,0 +1,110 @@
+"""Test MTE Memory Tagging on Apple platforms"""
+
+import lldb
+import re
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+import lldbsuite.test.cpu_feature as cpu_feature
+
+exe_name = "uaf_mte" # Must match Makefile
+
+
+class TestDarwinMTE(TestBase):
+ NO_DEBUG_INFO_TESTCASE = True
+
+ @skipUnlessFeature(cpu_feature.AArch64.MTE)
+ def test_tag_fault(self):
+ self.build()
+ exe = self.getBuildArtifact(exe_name)
+
+ target = self.dbg.CreateTarget(exe)
+ self.assertTrue(target, VALID_TARGET)
+
+ process = target.LaunchSimple(None, None, None)
+ self.assertState(process.GetState(), lldb.eStateStopped, PROCESS_STOPPED)
+
+ self.expect(
+ "thread info",
+ substrs=[
+ "stop reason = EXC_ARM_MTE_TAG_FAULT",
+ "MTE tag mismatch detected",
+ ],
+ )
+
+ @skipUnlessFeature(cpu_feature.AArch64.MTE)
+ def test_memory_region(self):
+ self.build()
+ lldbutil.run_to_source_breakpoint(
+ self, "// before free", lldb.SBFileSpec("main.c"), exe_name=exe_name
+ )
+
+ # (lldb) memory region ptr
+ # [0x00000001005ec000-0x00000001009ec000) rw-
+ # memory tagging: enabled
+ # Modified memory (dirty) page list provided, 2 entries.
+ # Dirty pages: 0x1005ec000, 0x1005fc000.
+ self.expect("memory region ptr", substrs=["memory tagging: enabled"])
+
+ @skipUnlessFeature(cpu_feature.AArch64.MTE)
+ def test_memory_read_with_tags(self):
+ self.build()
+ lldbutil.run_to_source_breakpoint(
+ self, "// before free", lldb.SBFileSpec("main.c"), exe_name=exe_name
+ )
+
+ # (lldb) memory read ptr-16 ptr+48 --show-tags
+ # 0x7d2c00930: 00 00 00 00 00 00 00 00 d0 e3 a5 0a 02 00 00 00 ................ (tag: 0x3)
+ # 0x7d2c00940: 48 65 6c 6c 6f 00 00 00 00 00 00 00 00 00 00 00 Hello........... (tag: 0xb)
+ # 0x7d2c00950: 57 6f 72 6c 64 00 00 00 00 00 00 00 00 00 00 00 World........... (tag: 0xb)
+ # 0x7d2c00960: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ (tag: 0x9)
+ self.expect(
+ "memory read ptr-16 ptr+48 --show-tags",
+ substrs=[" Hello...........", " World..........."],
+ patterns=[r"(.*\(tag: 0x[0-9a-f]\)\n){4}"],
+ )
+
+ def _parse_pointer_tag(self, output):
+ return re.search(r"Logical tag: (0x[0-9a-f])", output).group(1)
+
+ def _parse_memory_tags(self, output, expected_tag_count):
+ tags = re.findall(r"\): (0x[0-9a-f])", output)
+ self.assertEqual(len(tags), expected_tag_count)
+ return tags
+
+ @skipUnlessFeature(cpu_feature.AArch64.MTE)
+ def test_memory_tag_read(self):
+ self.build()
+ lldbutil.run_to_source_breakpoint(
+ self, "// before free", lldb.SBFileSpec("main.c"), exe_name=exe_name
+ )
+
+ # (lldb) memory tag read ptr-1 ptr+33
+ # Logical tag: 0x5
+ # Allocation tags:
+ # [0x100a65a40, 0x100a65a50): 0xf (mismatch)
+ # [0x100a65a50, 0x100a65a60): 0x5
+ # [0x100a65a60, 0x100a65a70): 0x5
+ # [0x100a65a70, 0x100a65a80): 0x2 (mismatch)
+ self.expect(
+ "memory tag read ptr-1 ptr+33",
+ substrs=["Logical tag: 0x", "Allocation tags:", "(mismatch)"],
+ patterns=[r"(\[.*\): 0x[0-9a-f].*\n){4}"],
+ )
+ output = self.res.GetOutput()
+ self.assertEqual(output.count("(mismatch)"), 2)
+ ptr_tag = self._parse_pointer_tag(output)
+ tags = self._parse_memory_tags(output, 4)
+ self.assertEqual(tags[1], ptr_tag)
+ self.assertEqual(tags[2], ptr_tag)
+ self.assertNotEqual(tags[0], ptr_tag) # Memory that comes before/after
+ self.assertNotEqual(tags[3], ptr_tag) # allocation has different tag.
+
+ # Continue running until MTE fault
+ self.expect("process continue", substrs=["stop reason = EXC_ARM_MTE_TAG_FAULT"])
+
+ self.runCmd("memory tag read ptr-1 ptr+33")
+ output = self.res.GetOutput()
+ self.assertEqual(output.count("(mismatch)"), 4)
+ tags = self._parse_memory_tags(output, 4)
+ self.assertTrue(all(t != ptr_tag for t in tags))
diff --git a/lldb/test/API/macosx/mte/main.c b/lldb/test/API/macosx/mte/main.c
new file mode 100644
index 0000000..f9f6b15
--- /dev/null
+++ b/lldb/test/API/macosx/mte/main.c
@@ -0,0 +1,28 @@
+#include <malloc/malloc.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+// Produce some names on the trace
+const size_t tag_granule = 16;
+static uint8_t *my_malloc(void) { return malloc(2 * tag_granule); }
+static uint8_t *allocate(void) { return my_malloc(); }
+
+static void my_free(void *ptr) { free(ptr); }
+static void deallocate(void *ptr) { my_free(ptr); }
+
+static void touch_memory(uint8_t *ptr) { ptr[7] = 1; } // invalid access
+static void modify(uint8_t *ptr) { touch_memory(ptr); }
+
+int main() {
+ uint8_t *ptr = allocate();
+
+ strncpy((char *)ptr, "Hello", 16);
+ strncpy((char *)ptr + 16, "World", 16);
+
+ deallocate(ptr); // before free
+
+ modify(ptr); // use-after-free
+
+ return 0;
+}
diff --git a/lldb/test/API/macosx/mte/mte-entitlements.plist b/lldb/test/API/macosx/mte/mte-entitlements.plist
new file mode 100644
index 0000000..6de5d56
--- /dev/null
+++ b/lldb/test/API/macosx/mte/mte-entitlements.plist
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>com.apple.security.hardened-process</key>
+ <true/>
+ <key>com.apple.security.hardened-process.checked-allocations</key>
+ <true/>
+</dict>
+</plist>
diff --git a/lldb/test/Shell/Expr/TestGlobalSymbolObjCConflict.c b/lldb/test/Shell/Expr/TestGlobalSymbolObjCConflict.c
new file mode 100644
index 0000000..8f1bb62
--- /dev/null
+++ b/lldb/test/Shell/Expr/TestGlobalSymbolObjCConflict.c
@@ -0,0 +1,35 @@
+// XFAIL: target-windows
+
+// Tests that LLDB correctly parses global symbols
+// starting with 'O'. On some platforms (e.g., Darwin)
+// C-symbols are prefixed with a '_'. The LLDB Macho-O
+// parses handles Objective-C metadata symbols starting
+// with '_OBJC' specially. This test ensures that we don't
+// lose track of regular global symbols with a '_O' prefix
+// in this.
+
+// RUN: %clang_host -c -g -fno-common %s -o %t.o
+// RUN: %clang_host %t.o -o %t.out
+// RUN: %lldb -b -x %t.out \
+// RUN: -o "b 29" \
+// RUN: -o "run" \
+// RUN: -o "p OglobalVar" \
+// RUN: -o "p Oabc" | FileCheck %s
+
+typedef struct {
+ int a;
+} Oabc_t;
+
+Oabc_t Oabc;
+int OglobalVar;
+
+int main(int argc, const char *argv[]) {
+ Oabc.a = 15;
+ OglobalVar = 10;
+ return OglobalVar + Oabc.a;
+}
+
+// CHECK: (lldb) p OglobalVar
+// CHECK: (int) 10
+// CHECK: (lldb) p Oabc
+// CHECK: (Oabc_t) (a = 15)
diff --git a/lldb/test/Shell/SymbolFile/NativePDB/symtab.cpp b/lldb/test/Shell/SymbolFile/NativePDB/symtab.cpp
index 81d643d..beb5ae2 100644
--- a/lldb/test/Shell/SymbolFile/NativePDB/symtab.cpp
+++ b/lldb/test/Shell/SymbolFile/NativePDB/symtab.cpp
@@ -1,4 +1,4 @@
-// REQUIRES: x86
+// REQUIRES: lld, x86
// Test symtab reading
// RUN: %build --compiler=clang-cl --arch=64 --nodefaultlib -o %t.exe -- %s
diff --git a/lldb/test/Shell/lit.cfg.py b/lldb/test/Shell/lit.cfg.py
index 505847f..cdc0cfe 100644
--- a/lldb/test/Shell/lit.cfg.py
+++ b/lldb/test/Shell/lit.cfg.py
@@ -33,7 +33,7 @@ config.test_format = toolchain.ShTestLldb(not use_lit_shell)
# suffixes: A list of file extensions to treat as test files. This is overriden
# by individual lit.local.cfg files in the test subdirectories.
-config.suffixes = [".test", ".cpp", ".s", ".m", ".ll"]
+config.suffixes = [".test", ".cpp", ".s", ".m", ".ll", ".c"]
# excludes: A list of directories to exclude from the testsuite. The 'Inputs'
# subdirectories contain auxiliary inputs for various tests in their parent
diff --git a/lldb/tools/debugserver/source/DNB.cpp b/lldb/tools/debugserver/source/DNB.cpp
index f541134..0cd48d9 100644
--- a/lldb/tools/debugserver/source/DNB.cpp
+++ b/lldb/tools/debugserver/source/DNB.cpp
@@ -1386,6 +1386,16 @@ int DNBProcessMemoryRegionInfo(nub_process_t pid, nub_addr_t addr,
return -1;
}
+nub_bool_t DNBProcessGetMemoryTags(nub_process_t pid, nub_addr_t addr,
+ nub_size_t size,
+ std::vector<uint8_t> &tags) {
+ MachProcessSP procSP;
+ if (GetProcessSP(pid, procSP))
+ return procSP->Task().GetMemoryTags(addr, size, tags);
+
+ return false;
+}
+
std::string DNBProcessGetProfileData(nub_process_t pid,
DNBProfileDataScanType scanType) {
MachProcessSP procSP;
diff --git a/lldb/tools/debugserver/source/DNB.h b/lldb/tools/debugserver/source/DNB.h
index 10d1f68..1f3d539 100644
--- a/lldb/tools/debugserver/source/DNB.h
+++ b/lldb/tools/debugserver/source/DNB.h
@@ -105,6 +105,9 @@ nub_bool_t DNBProcessMemoryDeallocate(nub_process_t pid,
nub_addr_t addr) DNB_EXPORT;
int DNBProcessMemoryRegionInfo(nub_process_t pid, nub_addr_t addr,
DNBRegionInfo *region_info) DNB_EXPORT;
+nub_bool_t DNBProcessGetMemoryTags(nub_process_t pid, nub_addr_t addr,
+ nub_size_t size,
+ std::vector<uint8_t> &tags) DNB_EXPORT;
std::string
DNBProcessGetProfileData(nub_process_t pid,
DNBProfileDataScanType scanType) DNB_EXPORT;
diff --git a/lldb/tools/debugserver/source/DNBDefs.h b/lldb/tools/debugserver/source/DNBDefs.h
index df8ca80..d98399a 100644
--- a/lldb/tools/debugserver/source/DNBDefs.h
+++ b/lldb/tools/debugserver/source/DNBDefs.h
@@ -358,10 +358,11 @@ struct DNBExecutableImageInfo {
struct DNBRegionInfo {
public:
DNBRegionInfo()
- : addr(0), size(0), permissions(0), dirty_pages(), vm_types() {}
+ : addr(0), size(0), permissions(0), flags(), dirty_pages(), vm_types() {}
nub_addr_t addr;
nub_addr_t size;
uint32_t permissions;
+ std::vector<std::string> flags;
std::vector<nub_addr_t> dirty_pages;
std::vector<std::string> vm_types;
};
diff --git a/lldb/tools/debugserver/source/MacOSX/MachTask.h b/lldb/tools/debugserver/source/MacOSX/MachTask.h
index 2284f6b..c4a20b8 100644
--- a/lldb/tools/debugserver/source/MacOSX/MachTask.h
+++ b/lldb/tools/debugserver/source/MacOSX/MachTask.h
@@ -56,6 +56,8 @@ public:
nub_size_t ReadMemory(nub_addr_t addr, nub_size_t size, void *buf);
nub_size_t WriteMemory(nub_addr_t addr, nub_size_t size, const void *buf);
int GetMemoryRegionInfo(nub_addr_t addr, DNBRegionInfo *region_info);
+ nub_bool_t GetMemoryTags(nub_addr_t addr, nub_size_t size,
+ std::vector<uint8_t> &tags);
std::string GetProfileData(DNBProfileDataScanType scanType);
nub_addr_t AllocateMemory(nub_size_t size, uint32_t permissions);
diff --git a/lldb/tools/debugserver/source/MacOSX/MachTask.mm b/lldb/tools/debugserver/source/MacOSX/MachTask.mm
index e2395cf..21156fe 100644
--- a/lldb/tools/debugserver/source/MacOSX/MachTask.mm
+++ b/lldb/tools/debugserver/source/MacOSX/MachTask.mm
@@ -229,6 +229,23 @@ int MachTask::GetMemoryRegionInfo(nub_addr_t addr, DNBRegionInfo *region_info) {
return ret;
}
+//----------------------------------------------------------------------
+// MachTask::GetMemoryTags
+//----------------------------------------------------------------------
+nub_bool_t MachTask::GetMemoryTags(nub_addr_t addr, nub_size_t size,
+ std::vector<uint8_t> &tags) {
+ task_t task = TaskPort();
+ if (task == TASK_NULL)
+ return false;
+
+ bool ok = m_vm_memory.GetMemoryTags(task, addr, size, tags);
+ DNBLogThreadedIf(LOG_MEMORY, "MachTask::GetMemoryTags ( addr = 0x%8.8llx, "
+ "size = 0x%8.8llx ) => %s ( tag count = %llu)",
+ (uint64_t)addr, (uint64_t)size, (ok ? "ok" : "err"),
+ (uint64_t)tags.size());
+ return ok;
+}
+
#define TIME_VALUE_TO_TIMEVAL(a, r) \
do { \
(r)->tv_sec = (a)->seconds; \
diff --git a/lldb/tools/debugserver/source/MacOSX/MachVMMemory.cpp b/lldb/tools/debugserver/source/MacOSX/MachVMMemory.cpp
index f3aa4d7..bb57245 100644
--- a/lldb/tools/debugserver/source/MacOSX/MachVMMemory.cpp
+++ b/lldb/tools/debugserver/source/MacOSX/MachVMMemory.cpp
@@ -13,6 +13,7 @@
#include "MachVMMemory.h"
#include "DNBLog.h"
#include "MachVMRegion.h"
+#include <cassert>
#include <dlfcn.h>
#include <mach/mach_vm.h>
#include <mach/shared_region.h>
@@ -123,6 +124,7 @@ nub_bool_t MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address,
region_info->addr = vmRegion.StartAddress();
region_info->size = vmRegion.GetByteSize();
region_info->permissions = vmRegion.GetDNBPermissions();
+ region_info->flags = vmRegion.GetFlags();
region_info->dirty_pages =
get_dirty_pages(task, vmRegion.StartAddress(), vmRegion.GetByteSize());
region_info->vm_types = vmRegion.GetMemoryTypes();
@@ -150,6 +152,63 @@ nub_bool_t MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address,
return true;
}
+// API availability:
+// mach_vm_update_pointers_with_remote_tags() - 26.0
+// VM_OFFSET_LIST_MAX macro - 26.1
+#ifndef VM_OFFSET_LIST_MAX
+#define VM_OFFSET_LIST_MAX 512
+#endif
+using mach_vm_offset_list_t = mach_vm_offset_t *;
+using mach_vm_update_pointers_with_remote_tags_t = kern_return_t(
+ mach_port_name_t target, mach_vm_offset_list_t in_pointer_list,
+ mach_msg_type_number_t in_pointer_listCnt,
+ mach_vm_offset_list_t out_pointer_list,
+ mach_msg_type_number_t *out_pointer_listCnt);
+
+nub_bool_t MachVMMemory::GetMemoryTags(task_t task, nub_addr_t address,
+ nub_size_t size,
+ std::vector<uint8_t> &tags) {
+ static auto mach_vm_update_pointers_with_remote_tags =
+ (mach_vm_update_pointers_with_remote_tags_t *)dlsym(
+ RTLD_DEFAULT, "mach_vm_update_pointers_with_remote_tags");
+ assert(mach_vm_update_pointers_with_remote_tags);
+
+ // Max batch size supported by mach_vm_update_pointers_with_remote_tags.
+ constexpr uint32_t max_ptr_count = VM_OFFSET_LIST_MAX;
+ constexpr uint32_t tag_shift = 56;
+ constexpr nub_addr_t tag_mask =
+ ((nub_addr_t)0x0f << tag_shift); // Lower half of top byte.
+ constexpr uint32_t tag_granule = 16;
+
+ mach_msg_type_number_t ptr_count =
+ (size / tag_granule) + ((size % tag_granule > 0) ? 1 : 0);
+ ptr_count = std::min(ptr_count, max_ptr_count);
+
+ auto ptr_arr = std::make_unique<mach_vm_offset_t[]>(ptr_count);
+ for (size_t i = 0; i < ptr_count; i++)
+ ptr_arr[i] = (address + i * tag_granule);
+
+ mach_msg_type_number_t ptr_count_out = ptr_count;
+ m_err = mach_vm_update_pointers_with_remote_tags(
+ task, ptr_arr.get(), ptr_count, ptr_arr.get(), &ptr_count_out);
+
+ const bool failed = (m_err.Fail() || (ptr_count != ptr_count_out));
+ if (failed || DNBLogCheckLogBit(LOG_MEMORY))
+ m_err.LogThreaded("::mach_vm_update_pointers_with_remote_tags ( task = "
+ "0x%4.4x, ptr_count = %d ) => %i ( ptr_count_out = %d)",
+ task, ptr_count, m_err.Status(), ptr_count_out);
+ if (failed)
+ return false;
+
+ tags.reserve(ptr_count);
+ for (size_t i = 0; i < ptr_count; i++) {
+ nub_addr_t tag = (ptr_arr[i] & tag_mask) >> tag_shift;
+ tags.push_back(tag);
+ }
+
+ return true;
+}
+
static uint64_t GetPhysicalMemory() {
// This doesn't change often at all. No need to poll each time.
static uint64_t physical_memory = 0;
diff --git a/lldb/tools/debugserver/source/MacOSX/MachVMMemory.h b/lldb/tools/debugserver/source/MacOSX/MachVMMemory.h
index 05d2c02..8a76160 100644
--- a/lldb/tools/debugserver/source/MacOSX/MachVMMemory.h
+++ b/lldb/tools/debugserver/source/MacOSX/MachVMMemory.h
@@ -28,6 +28,8 @@ public:
nub_size_t PageSize(task_t task);
nub_bool_t GetMemoryRegionInfo(task_t task, nub_addr_t address,
DNBRegionInfo *region_info);
+ nub_bool_t GetMemoryTags(task_t task, nub_addr_t address, nub_size_t size,
+ std::vector<uint8_t> &tags);
nub_bool_t GetMemoryProfile(DNBProfileDataScanType scanType, task_t task,
struct task_basic_info ti, cpu_type_t cputype,
nub_process_t pid, vm_statistics64_data_t &vminfo,
diff --git a/lldb/tools/debugserver/source/MacOSX/MachVMRegion.cpp b/lldb/tools/debugserver/source/MacOSX/MachVMRegion.cpp
index 97908b4..9d0d60f 100644
--- a/lldb/tools/debugserver/source/MacOSX/MachVMRegion.cpp
+++ b/lldb/tools/debugserver/source/MacOSX/MachVMRegion.cpp
@@ -114,6 +114,11 @@ bool MachVMRegion::RestoreProtections() {
return false;
}
+#ifdef VM_REGION_FLAG_JIT_ENABLED
+#define VM_REGION_HAS_FLAGS 1
+#else
+#define VM_REGION_HAS_FLAGS 0
+#endif
bool MachVMRegion::GetRegionForAddress(nub_addr_t addr) {
// Restore any original protections and clear our vars
Clear();
@@ -140,6 +145,30 @@ bool MachVMRegion::GetRegionForAddress(nub_addr_t addr) {
if (failed)
return false;
if (log_protections) {
+#if VM_REGION_HAS_FLAGS
+ DNBLogThreaded("info = { prot = %u, "
+ "max_prot = %u, "
+ "inheritance = 0x%8.8x, "
+ "offset = 0x%8.8llx, "
+ "user_tag = 0x%8.8x, "
+ "ref_count = %u, "
+ "shadow_depth = %u, "
+ "ext_pager = %u, "
+ "share_mode = %u, "
+ "is_submap = %d, "
+ "behavior = %d, "
+ "object_id = 0x%8.8x, "
+ "user_wired_count = 0x%4.4x, "
+ "flags = %d }",
+ m_data.protection, m_data.max_protection, m_data.inheritance,
+ (uint64_t)m_data.offset, m_data.user_tag, m_data.ref_count,
+ m_data.shadow_depth, m_data.external_pager,
+ m_data.share_mode, m_data.is_submap, m_data.behavior,
+ m_data.object_id, m_data.user_wired_count, m_data.flags);
+#else
+ // Duplicate log call instead of #if-defing printing of flags to avoid
+ // compiler warning: 'embedding a directive within macro arguments has
+ // undefined behavior'
DNBLogThreaded("info = { prot = %u, "
"max_prot = %u, "
"inheritance = 0x%8.8x, "
@@ -158,6 +187,7 @@ bool MachVMRegion::GetRegionForAddress(nub_addr_t addr) {
m_data.shadow_depth, m_data.external_pager,
m_data.share_mode, m_data.is_submap, m_data.behavior,
m_data.object_id, m_data.user_wired_count);
+#endif
}
m_curr_protection = m_data.protection;
@@ -183,6 +213,22 @@ uint32_t MachVMRegion::GetDNBPermissions() const {
return dnb_permissions;
}
+#ifndef VM_REGION_FLAG_MTE_ENABLED
+#define VM_REGION_FLAG_MTE_ENABLED 0x4
+#endif
+std::vector<std::string> MachVMRegion::GetFlags() const {
+ std::vector<std::string> flags;
+#if VM_REGION_HAS_FLAGS
+ if (m_data.flags & VM_REGION_FLAG_JIT_ENABLED)
+ flags.push_back("jit");
+ if (m_data.flags & VM_REGION_FLAG_TPRO_ENABLED)
+ flags.push_back("tpro");
+ if (m_data.flags & VM_REGION_FLAG_MTE_ENABLED)
+ flags.push_back("mt");
+#endif
+ return flags;
+}
+
std::vector<std::string> MachVMRegion::GetMemoryTypes() const {
std::vector<std::string> types;
if (m_data.user_tag == VM_MEMORY_STACK) {
diff --git a/lldb/tools/debugserver/source/MacOSX/MachVMRegion.h b/lldb/tools/debugserver/source/MacOSX/MachVMRegion.h
index cb77058..ba6e1f3 100644
--- a/lldb/tools/debugserver/source/MacOSX/MachVMRegion.h
+++ b/lldb/tools/debugserver/source/MacOSX/MachVMRegion.h
@@ -40,9 +40,10 @@ public:
vm_prot_t prot);
bool RestoreProtections();
bool GetRegionForAddress(nub_addr_t addr);
- std::vector<std::string> GetMemoryTypes() const;
uint32_t GetDNBPermissions() const;
+ std::vector<std::string> GetFlags() const;
+ std::vector<std::string> GetMemoryTypes() const;
const DNBError &GetError() { return m_err; }
diff --git a/lldb/tools/debugserver/source/RNBRemote.cpp b/lldb/tools/debugserver/source/RNBRemote.cpp
index d9fb22c..434e9cf 100644
--- a/lldb/tools/debugserver/source/RNBRemote.cpp
+++ b/lldb/tools/debugserver/source/RNBRemote.cpp
@@ -22,6 +22,9 @@
#include <mach/mach_vm.h>
#include <mach/task_info.h>
#include <memory>
+#if __has_include(<os/security_config.h>)
+#include <os/security_config.h>
+#endif
#include <pwd.h>
#include <string>
#include <sys/stat.h>
@@ -502,6 +505,8 @@ void RNBRemote::CreatePacketTable() {
memory_region_info, &RNBRemote::HandlePacket_MemoryRegionInfo, NULL,
"qMemoryRegionInfo", "Return size and attributes of a memory region that "
"contains the given address"));
+ t.push_back(Packet(get_memory_tags, &RNBRemote::HandlePacket_qMemTags, NULL,
+ "qMemTags", "Return tags for a region of memory"));
t.push_back(Packet(get_profile_data, &RNBRemote::HandlePacket_GetProfileData,
NULL, "qGetProfileData",
"Return profiling data of the current target."));
@@ -3475,6 +3480,18 @@ static bool GetProcessNameFrom_vAttach(const char *&p,
return return_val;
}
+static bool supports_memory_tagging() {
+ const char *name = "hw.optional.arm.FEAT_MTE4";
+ uint32_t val;
+ size_t len = sizeof(val);
+ int ret = ::sysctlbyname(name, &val, &len, nullptr, 0);
+ if (ret != 0)
+ return false;
+
+ assert(len == sizeof(val));
+ return val;
+}
+
rnb_err_t RNBRemote::HandlePacket_qSupported(const char *p) {
uint32_t max_packet_size = 128 * 1024; // 128 KiB is a reasonable max packet
// size--debugger can always use less
@@ -3505,6 +3522,9 @@ rnb_err_t RNBRemote::HandlePacket_qSupported(const char *p) {
reply << "SupportedWatchpointTypes=x86_64;";
#endif
+ if (supports_memory_tagging())
+ reply << "memory-tagging+;";
+
return SendPacket(reply.str().c_str());
}
@@ -4251,7 +4271,6 @@ rnb_err_t RNBRemote::HandlePacket_MemoryRegionInfo(const char *p) {
is in unmapped memory
Region lookup cannot be performed on this platform or process is not
yet launched
- This packet isn't implemented
Examples of use:
qMemoryRegionInfo:3a55140
@@ -4303,6 +4322,16 @@ rnb_err_t RNBRemote::HandlePacket_MemoryRegionInfo(const char *p) {
ostrm << 'x';
ostrm << ';';
+ if (!region_info.flags.empty()) {
+ ostrm << "flags:";
+ for (size_t i = 0; i < region_info.flags.size(); i++) {
+ if (i != 0)
+ ostrm << " "; // Separator is whitespace
+ ostrm << region_info.flags[i];
+ }
+ ostrm << ";";
+ }
+
ostrm << "dirty-pages:";
if (region_info.dirty_pages.size() > 0) {
bool first = true;
@@ -4327,6 +4356,62 @@ rnb_err_t RNBRemote::HandlePacket_MemoryRegionInfo(const char *p) {
return SendPacket(ostrm.str());
}
+// qMemTags:<hex address>,<hex length>:<hex type>
+rnb_err_t RNBRemote::HandlePacket_qMemTags(const char *p) {
+ nub_process_t pid = m_ctx.ProcessID();
+ if (pid == INVALID_NUB_PROCESS)
+ return SendPacket("OK");
+
+ StdStringExtractor packet(p);
+ packet.SetFilePos(strlen("qMemTags:"));
+
+ // Address
+ nub_addr_t addr =
+ packet.GetHexMaxU64(StdStringExtractor::BigEndian, INVALID_NUB_ADDRESS);
+ if (addr == INVALID_NUB_ADDRESS)
+ return HandlePacket_ILLFORMED(__FILE__, __LINE__, p,
+ "Invalid/missing address in qMemTags packet");
+ // ,
+ if (packet.GetChar() != ',')
+ return HandlePacket_ILLFORMED(__FILE__, __LINE__, p,
+ "Invalid qMemTags packet format");
+ // Length
+ uint64_t length = packet.GetHexMaxU64(StdStringExtractor::BigEndian, 0);
+ if (length == 0)
+ return HandlePacket_ILLFORMED(__FILE__, __LINE__, p,
+ "Invalid/missing length in qMemTags packet");
+ // :
+ if (packet.GetChar() != ':')
+ return HandlePacket_ILLFORMED(__FILE__, __LINE__, p,
+ "Invalid qMemTags packet format");
+ // Type
+ // On the LLDB side this is a `int32_t` serialized as (unsigned) hex, which
+ // means negative values will show up as large positive values here. Right
+ // now, we only support MTE (type 1), so we can ignore this complication.
+ uint32_t type = packet.GetHexMaxU32(StdStringExtractor::BigEndian, 0);
+ if (type != 1 /* MTE */)
+ return HandlePacket_ILLFORMED(__FILE__, __LINE__, p,
+ "Invalid/missing type in qMemTags packet, "
+ "only MTE (type 1) is supported");
+ // <EOF>
+ if (packet.GetBytesLeft() != 0)
+ return HandlePacket_ILLFORMED(__FILE__, __LINE__, p,
+ "Invalid qMemTags packet format");
+
+ std::vector<uint8_t> tags;
+ bool ok = DNBProcessGetMemoryTags(pid, addr, length, tags);
+ if (!ok)
+ return SendErrorPacket("E91");
+
+ std::ostringstream ostrm;
+ ostrm << "m"; // Multi part replies
+ for (uint8_t tag : tags) {
+ ostrm << RAWHEX8(tag); // 2 hex chars per tag
+ }
+
+ return SendPacket(ostrm.str());
+}
+
// qGetProfileData;scan_type:0xYYYYYYY
rnb_err_t RNBRemote::HandlePacket_GetProfileData(const char *p) {
nub_process_t pid = m_ctx.ProcessID();
@@ -6162,6 +6247,21 @@ GetCPUTypesFromHost(nub_process_t pid) {
return {cputype, cpusubtype};
}
+static bool ProcessRunningWithMemoryTagging(pid_t pid) {
+#if __has_include(<os/security_config.h>)
+ if (__builtin_available(macOS 26.0, iOS 26.0, tvOS 26.0, watchOS 26.0,
+ visionOS 26.0, driverkit 25.0, *)) {
+ os_security_config_t config;
+ int ret = ::os_security_config_get_for_proc(pid, &config);
+ if (ret != 0)
+ return false;
+
+ return (config & OS_SECURITY_CONFIG_MTE);
+ }
+#endif
+ return false;
+}
+
// Note that all numeric values returned by qProcessInfo are hex encoded,
// including the pid and the cpu type.
@@ -6338,6 +6438,9 @@ rnb_err_t RNBRemote::HandlePacket_qProcessInfo(const char *p) {
rep << "vendor:apple;";
+ if (ProcessRunningWithMemoryTagging(pid))
+ rep << "mte:enabled;";
+
#if defined(__LITTLE_ENDIAN__)
rep << "endian:little;";
#elif defined(__BIG_ENDIAN__)
diff --git a/lldb/tools/debugserver/source/RNBRemote.h b/lldb/tools/debugserver/source/RNBRemote.h
index ad254ae..cf1c978 100644
--- a/lldb/tools/debugserver/source/RNBRemote.h
+++ b/lldb/tools/debugserver/source/RNBRemote.h
@@ -121,6 +121,7 @@ public:
set_list_threads_in_stop_reply, // 'QListThreadsInStopReply:'
sync_thread_state, // 'QSyncThreadState:'
memory_region_info, // 'qMemoryRegionInfo:'
+ get_memory_tags, // 'qMemTags:'
get_profile_data, // 'qGetProfileData'
set_enable_profiling, // 'QSetEnableAsyncProfiling'
enable_compression, // 'QEnableCompression:'
@@ -237,6 +238,7 @@ public:
rnb_err_t HandlePacket_SaveRegisterState(const char *p);
rnb_err_t HandlePacket_RestoreRegisterState(const char *p);
rnb_err_t HandlePacket_MemoryRegionInfo(const char *p);
+ rnb_err_t HandlePacket_qMemTags(const char *p);
rnb_err_t HandlePacket_GetProfileData(const char *p);
rnb_err_t HandlePacket_SetEnableAsyncProfiling(const char *p);
rnb_err_t HandlePacket_QEnableCompression(const char *p);
diff --git a/llvm/.clang-format b/llvm/.clang-format
index 5bead5f..ecb44bf 100644
--- a/llvm/.clang-format
+++ b/llvm/.clang-format
@@ -1,2 +1,2 @@
BasedOnStyle: LLVM
-
+LineEnding: LF
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index b981929..c450ee5 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -1011,6 +1011,9 @@ set(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR ${LLVM_ENABLE_PER_TARGET_RUNTIME_DIR_defa
set(LLVM_PROFDATA_FILE "" CACHE FILEPATH
"Profiling data file to use when compiling in order to improve runtime performance.")
+set(LLVM_SPROFDATA_FILE "" CACHE FILEPATH
+ "Sampling profiling data file to use when compiling in order to improve runtime performance.")
+
if(LLVM_INCLUDE_TESTS)
# All LLVM Python files should be compatible down to this minimum version.
set(LLVM_MINIMUM_PYTHON_VERSION 3.8)
diff --git a/llvm/Maintainers.md b/llvm/Maintainers.md
index 5afdd15..e522592 100644
--- a/llvm/Maintainers.md
+++ b/llvm/Maintainers.md
@@ -123,6 +123,13 @@ a.bataev@outlook.com (email), [alexey-bataev](https://github.com/alexey-bataev)
Chandler Carruth \
chandlerc@gmail.com, chandlerc@google.com (email), [chandlerc](https://github.com/chandlerc) (GitHub)
+#### DFAJumpThreading
+
+Hongyu Chen \
+xxs\_chy@outlook.com (email), [XChy](https://github.com/XChy) (Github) \
+Usman Nadeem \
+mnadeem@quicinc.com (email), [UsmanNadeem](https://github.com/UsmanNadeem) (Github)
+
### Instrumentation and sanitizers
#### Sanitizers not covered by someone else
diff --git a/llvm/cmake/modules/HandleLLVMOptions.cmake b/llvm/cmake/modules/HandleLLVMOptions.cmake
index 8eca29f..d4195db6 100644
--- a/llvm/cmake/modules/HandleLLVMOptions.cmake
+++ b/llvm/cmake/modules/HandleLLVMOptions.cmake
@@ -1184,7 +1184,7 @@ if(LLVM_ENABLE_EH AND NOT LLVM_ENABLE_RTTI)
message(FATAL_ERROR "Exception handling requires RTTI. You must set LLVM_ENABLE_RTTI to ON")
endif()
-set(LLVM_BUILD_INSTRUMENTED OFF CACHE STRING "Build LLVM and tools with PGO instrumentation. May be specified as IR or Frontend")
+set(LLVM_BUILD_INSTRUMENTED OFF CACHE STRING "Build LLVM and tools with PGO instrumentation. May be specified as IR, Frontend, CSIR, CSSPGO")
set(LLVM_VP_COUNTERS_PER_SITE "1.5" CACHE STRING "Value profile counters to use per site for IR PGO with Clang")
mark_as_advanced(LLVM_BUILD_INSTRUMENTED LLVM_VP_COUNTERS_PER_SITE)
string(TOUPPER "${LLVM_BUILD_INSTRUMENTED}" uppercase_LLVM_BUILD_INSTRUMENTED)
@@ -1217,6 +1217,19 @@ if (LLVM_BUILD_INSTRUMENTED)
CMAKE_EXE_LINKER_FLAGS
CMAKE_SHARED_LINKER_FLAGS)
endif()
+ elseif(uppercase_LLVM_BUILD_INSTRUMENTED STREQUAL "CSSPGO")
+ if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ append("-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer -fno-optimize-sibling-calls -fpseudo-probe-for-profiling -fdebug-info-for-profiling"
+ CMAKE_CXX_FLAGS
+ CMAKE_C_FLAGS)
+ if(NOT LINKER_IS_LLD_LINK)
+ append("-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer -fno-optimize-sibling-calls -fpseudo-probe-for-profiling -fdebug-info-for-profiling"
+ CMAKE_EXE_LINKER_FLAGS
+ CMAKE_SHARED_LINKER_FLAGS)
+ endif()
+ else()
+ message(FATAL_ERROR "LLVM_BUILD_INSTRUMENTED=CSSPGO can only be specified when compiling with clang")
+ endif()
else()
append("-fprofile-instr-generate=\"${LLVM_PROFILE_FILE_PATTERN}\""
CMAKE_CXX_FLAGS
@@ -1269,6 +1282,21 @@ elseif(LLVM_PROFDATA_FILE)
message(WARNING "LLVM_PROFDATA_FILE specified, but ${LLVM_PROFDATA_FILE} not found")
endif()
+if(LLVM_SPROFDATA_FILE AND EXISTS ${LLVM_SPROFDATA_FILE})
+ if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" )
+ append("-fpseudo-probe-for-profiling -fprofile-sample-use=\"${LLVM_SPROFDATA_FILE}\""
+ CMAKE_CXX_FLAGS
+ CMAKE_C_FLAGS)
+ if(NOT LINKER_IS_LLD_LINK)
+ append("-fpseudo-probe-for-profiling -fprofile-sample-use=\"${LLVM_SPROFDATA_FILE}\""
+ CMAKE_EXE_LINKER_FLAGS
+ CMAKE_SHARED_LINKER_FLAGS)
+ endif()
+ else()
+ message(FATAL_ERROR "LLVM_SPROFDATA_FILE can only be specified when compiling with clang")
+ endif()
+endif()
+
option(LLVM_BUILD_INSTRUMENTED_COVERAGE "Build LLVM and tools with Code Coverage instrumentation" Off)
option(LLVM_INDIVIDUAL_TEST_COVERAGE "Emit individual coverage file for each test case." OFF)
mark_as_advanced(LLVM_BUILD_INSTRUMENTED_COVERAGE)
diff --git a/llvm/docs/AMDGPU/AMDGPUAsmGFX12.rst b/llvm/docs/AMDGPU/AMDGPUAsmGFX12.rst
new file mode 100644
index 0000000..7259ee87
--- /dev/null
+++ b/llvm/docs/AMDGPU/AMDGPUAsmGFX12.rst
@@ -0,0 +1,2002 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+====================================================================================
+Syntax of GFX12 Instructions
+====================================================================================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+This document describes the syntax of GFX12 instructions.
+
+Notation
+========
+
+Notation used in this document is explained :ref:`here<amdgpu_syn_instruction_notation>`.
+
+Overview
+========
+
+An overview of generic syntax and other features of AMDGPU instructions may be found :ref:`in this document<amdgpu_syn_instructions>`.
+
+Instructions
+============
+
+
+SMEM
+----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **SRC3** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ s_atc_probe :ref:`sdata<amdgpu_synid_gfx12_sdata_d725ab>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_atc_probe_buffer :ref:`sdata<amdgpu_synid_gfx12_sdata_d725ab>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_buffer_load_b128 :ref:`sdata<amdgpu_synid_gfx12_sdata_4585b8>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_buffer_load_b256 :ref:`sdata<amdgpu_synid_gfx12_sdata_0974a4>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_buffer_load_b32 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_buffer_load_b512 :ref:`sdata<amdgpu_synid_gfx12_sdata_6c003b>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_buffer_load_b64 :ref:`sdata<amdgpu_synid_gfx12_sdata_354189>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_buffer_load_b96 :ref:`sdata<amdgpu_synid_gfx12_sdata_dd9dd8>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_buffer_load_i16 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_buffer_load_i8 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_buffer_load_u16 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_buffer_load_u8 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_buffer_nop :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_buffer_prefetch_data :ref:`sbase<amdgpu_synid_gfx12_sbase_453b95>`, :ref:`ioffset<amdgpu_synid_gfx12_ioffset>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_8ec073>`, :ref:`sdata<amdgpu_synid_gfx12_sdata_5c7b50>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_dcache_inv :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_load_b128 :ref:`sdata<amdgpu_synid_gfx12_sdata_4585b8>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_load_b256 :ref:`sdata<amdgpu_synid_gfx12_sdata_0974a4>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_load_b32 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_load_b512 :ref:`sdata<amdgpu_synid_gfx12_sdata_6c003b>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_load_b64 :ref:`sdata<amdgpu_synid_gfx12_sdata_354189>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_load_b96 :ref:`sdata<amdgpu_synid_gfx12_sdata_dd9dd8>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_load_i16 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_load_i8 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_load_u16 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_load_u8 :ref:`sdata<amdgpu_synid_gfx12_sdata_836716>`, :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_prefetch_data :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`ioffset<amdgpu_synid_gfx12_ioffset>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>`, :ref:`sdata<amdgpu_synid_gfx12_sdata_5c7b50>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_prefetch_data_pc_rel :ref:`ioffset<amdgpu_synid_gfx12_ioffset>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>`, :ref:`sdata<amdgpu_synid_gfx12_sdata_5c7b50>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_prefetch_inst :ref:`sbase<amdgpu_synid_gfx12_sbase_47adb7>`, :ref:`ioffset<amdgpu_synid_gfx12_ioffset>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>`, :ref:`sdata<amdgpu_synid_gfx12_sdata_5c7b50>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ s_prefetch_inst_pc_rel :ref:`ioffset<amdgpu_synid_gfx12_ioffset>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_ec005a>`, :ref:`sdata<amdgpu_synid_gfx12_sdata_5c7b50>` :ref:`offset24s<amdgpu_synid_smem_offset24s>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+
+SOP1
+----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ s_abs_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_alloc_vgpr :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_and_not0_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_and_not0_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_and_not0_wrexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_and_not0_wrexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_and_not1_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_and_not1_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_and_not1_wrexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_and_not1_wrexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_and_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_and_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_barrier_init :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>`
+ s_barrier_join :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>`
+ s_barrier_signal :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>`
+ s_barrier_signal_isfirst :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>`
+ s_bcnt0_i32_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_bcnt0_i32_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_bcnt1_i32_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_bcnt1_i32_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_bitreplicate_b64_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_bitset0_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_bitset0_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_bitset1_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_bitset1_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_brev_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_brev_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_ceil_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_ceil_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_cls_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_cls_i32_i64 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_clz_i32_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_clz_i32_u64 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_cmov_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_cmov_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_ctz_i32_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_ctz_i32_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_cvt_f16_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_cvt_f32_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_cvt_f32_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_cvt_f32_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_cvt_hi_f32_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_cvt_i32_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_cvt_u32_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_floor_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_floor_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_get_barrier_state :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>`
+ s_get_lock_state :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>`
+ s_getpc_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`
+ s_mov_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_mov_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_mov_fed_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_mov_from_global_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_007f9c>`
+ s_mov_from_global_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_2797bc>`
+ s_mov_regrd_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_mov_to_global_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_mov_to_global_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_movreld_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_movreld_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_movrels_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_007f9c>`
+ s_movrels_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_2797bc>`
+ s_movrelsd_2_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_007f9c>`
+ s_nand_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_nand_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_nor_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_nor_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_not_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_not_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_or_not0_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_or_not0_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_or_not1_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_or_not1_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_or_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_or_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_quadmask_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_quadmask_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_rfe_b64 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_2797bc>`
+ s_rndne_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_rndne_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_sendmsg_rtn_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_245536>`
+ s_sendmsg_rtn_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_245536>`
+ s_setpc_b64 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_2797bc>`
+ s_sext_i32_i16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_sext_i32_i8 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_sleep_var :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_swap_to_global_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_007f9c>`
+ s_swappc_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_2797bc>`
+ s_trunc_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_trunc_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_try_lock :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>`
+ s_unlock :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>`
+ s_wakeup_barrier :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_1a9ca5>`
+ s_wqm_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_wqm_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_xnor_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_xnor_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+ s_xor_saveexec_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_836716>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`
+ s_xor_saveexec_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`
+
+SOP2
+----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ s_absdiff_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_add_co_ci_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_add_co_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_add_co_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_add_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_add_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_add_nc_u64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_and_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_and_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_and_not1_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_and_not1_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_ashr_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_ashr_i64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_bfe_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_bfe_i64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_bfe_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_bfe_u64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_bfm_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_bfm_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cselect_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cselect_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_cvt_pk_rtz_f16_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_fmaak_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ s_fmac_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_fmac_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_fmamk_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_lshl1_add_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_lshl2_add_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_lshl3_add_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_lshl4_add_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_lshl_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_lshl_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_lshr_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_lshr_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_max_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_max_num_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_max_num_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_max_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_maximum_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_maximum_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_min_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_min_num_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_min_num_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_min_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_minimum_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_minimum_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_mul_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_mul_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_mul_hi_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_mul_hi_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_mul_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_mul_u64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_nand_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_nand_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_nor_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_nor_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_or_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_or_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_or_not1_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_or_not1_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_pack_hh_b32_b16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_pack_hl_b32_b16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_pack_lh_b32_b16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_pack_ll_b32_b16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_sub_co_ci_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_sub_co_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_sub_co_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_sub_f16 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_sub_f32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_sub_nc_u64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_xnor_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_xnor_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_xor_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_xor_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+
+SOPC
+----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **SRC0** **SRC1**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ s_bitcmp0_b32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_bitcmp0_b64 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_bitcmp1_b32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_bitcmp1_b64 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_eq_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_eq_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_eq_i32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_eq_u32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_eq_u64 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_cmp_ge_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_ge_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_ge_i32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_ge_u32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_gt_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_gt_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_gt_i32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_gt_u32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_le_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_le_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_le_i32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_le_u32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_lg_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_lg_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_lg_i32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_lg_u32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_lg_u64 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_bbb4c6>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_bbb4c6>`
+ s_cmp_lt_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_lt_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_lt_i32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_lt_u32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_neq_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_neq_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_nge_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_nge_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_ngt_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_ngt_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_nle_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_nle_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_nlg_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_nlg_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_nlt_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_nlt_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_o_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_o_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_u_f16 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+ s_cmp_u_f32 :ref:`ssrc0<amdgpu_synid_gfx12_ssrc0_c4593f>`, :ref:`ssrc1<amdgpu_synid_gfx12_ssrc1_c4593f>`
+
+SOPK
+----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ s_addk_co_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_call_b64 :ref:`sdst<amdgpu_synid_gfx12_sdst_354189>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_cmovk_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_cmpk_eq_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_cmpk_eq_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_cmpk_ge_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_cmpk_ge_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_cmpk_gt_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_cmpk_gt_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_cmpk_le_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_cmpk_le_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_cmpk_lg_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_cmpk_lg_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_cmpk_lt_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_cmpk_lt_u32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_getreg_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_7ed651>`
+ s_getreg_regrd_b32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_7ed651>`
+ s_movk_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_mulk_i32 :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_setreg_b32 :ref:`simm16<amdgpu_synid_gfx12_simm16_cc1716>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_20064d>`
+ s_setreg_imm32_b32 :ref:`simm16<amdgpu_synid_gfx12_simm16_cc1716>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ s_subvector_loop_begin :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_subvector_loop_end :ref:`sdst<amdgpu_synid_gfx12_sdst_ced58d>`, :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_version :ref:`simm16<amdgpu_synid_gfx12_simm16_15ccdd>`
+
+SOPP
+----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **SRC**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ s_barrier
+ s_barrier_leave
+ s_barrier_wait :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_branch :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_cbranch_cdbgsys :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_cbranch_cdbgsys_and_user :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_cbranch_cdbgsys_or_user :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_cbranch_cdbguser :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_cbranch_execnz :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_cbranch_execz :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_cbranch_scc0 :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_cbranch_scc1 :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_cbranch_vccnz :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_cbranch_vccz :ref:`simm16<amdgpu_synid_gfx12_simm16_3d2a4f>`
+ s_clause :ref:`simm16<amdgpu_synid_gfx12_simm16_730a13>`
+ s_code_end
+ s_decperflevel :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_delay_alu :ref:`simm16<amdgpu_synid_gfx12_simm16_c98889>`
+ s_denorm_mode :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_endpgm
+ s_endpgm_ordered_ps_done
+ s_endpgm_saved
+ s_icache_inv
+ s_incperflevel :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_nop :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_round_mode :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_sendmsg :ref:`simm16<amdgpu_synid_gfx12_simm16_ee8b30>`
+ s_sendmsghalt :ref:`simm16<amdgpu_synid_gfx12_simm16_ee8b30>`
+ s_set_inst_prefetch_distance :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_sethalt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_setkill :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_setprio :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_singleuse_vdst :ref:`simm16<amdgpu_synid_gfx12_simm16_81e671>`
+ s_sleep :ref:`simm16<amdgpu_synid_gfx12_simm16_81e671>`
+ s_trap :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_ttracedata
+ s_ttracedata_imm :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_wait_alu :ref:`simm16<amdgpu_synid_gfx12_simm16_81e671>`
+ s_wait_bvhcnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_wait_dscnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_wait_event :ref:`simm16<amdgpu_synid_gfx12_simm16_81e671>`
+ s_wait_expcnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_wait_idle
+ s_wait_kmcnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_wait_loadcnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_wait_loadcnt_dscnt :ref:`simm16<amdgpu_synid_gfx12_simm16_81e671>`
+ s_wait_samplecnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_wait_storecnt :ref:`simm16<amdgpu_synid_gfx12_simm16_39b593>`
+ s_wait_storecnt_dscnt :ref:`simm16<amdgpu_synid_gfx12_simm16_81e671>`
+ s_waitcnt :ref:`simm16<amdgpu_synid_gfx12_simm16_218bea>`
+ s_wakeup
+
+VBUFFER
+-------
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ buffer_atomic_add_f32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_add_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_add_u64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_and_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_and_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_cmpswap_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_cmpswap_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_cond_sub_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_dec_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_dec_u64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_inc_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_inc_u64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_max_i32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_max_i64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_max_num_f32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_max_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_max_u64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_min_i32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_min_i64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_min_num_f32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_min_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_min_u64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_or_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_or_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_pk_add_bf16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_pk_add_f16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_sub_clamp_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_sub_u32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_sub_u64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_swap_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_swap_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_xor_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_atomic_xor_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_gl0_inv :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_gl1_inv :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_b128 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_b96 :ref:`vdata<amdgpu_synid_gfx12_vdata_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_block :ref:`vdata<amdgpu_synid_gfx12_vdata_2eda77>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_d16_b16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_d16_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_d16_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_d16_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_d16_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_d16_hi_b16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_d16_hi_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_d16_hi_i8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_d16_hi_u8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_d16_i8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_d16_u8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_i16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_i8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_lds_b32 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_lds_format_x :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_lds_i16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_lds_i8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_lds_u16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_lds_u8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_u16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_load_u8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_nop :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_b128 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_b16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_b32 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_b64 :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_b8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_b96 :ref:`vdata<amdgpu_synid_gfx12_vdata_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_block :ref:`vdata<amdgpu_synid_gfx12_vdata_2eda77>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_d16_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_d16_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_d16_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_d16_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_d16_hi_b16 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_d16_hi_b8 :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_d16_hi_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ buffer_store_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_load_d16_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_load_d16_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_load_d16_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_load_d16_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_load_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_load_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_load_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_load_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_store_d16_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_store_d16_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_store_d16_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_store_d16_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_store_format_x :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_store_format_xy :ref:`vdata<amdgpu_synid_gfx12_vdata_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_store_format_xyz :ref:`vdata<amdgpu_synid_gfx12_vdata_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ tbuffer_store_format_xyzw :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>`, :ref:`soffset<amdgpu_synid_gfx12_soffset_c5b88c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`idxen<amdgpu_synid_idxen>` :ref:`offen<amdgpu_synid_offen>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+
+VDS
+---
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ ds_add_f32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_add_f64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_add_rtn_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_add_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_add_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_add_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_add_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_and_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_and_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_and_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_and_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_append :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_bpermute_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_bpermute_fi_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_bpermute_fi_from_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_bpermute_fi_to_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_bpermute_fi_to_simd_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_bpermute_from_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_bpermute_to_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_bpermute_to_simd_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_bvh_stack_push4_pop1_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_e016a1>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_bvh_stack_push8_pop1_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_731030>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_bvh_stack_push8_pop2_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_731030>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_cmpstore_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_cmpstore_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_cmpstore_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_cmpstore_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_cond_sub_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_cond_sub_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_condxchg32_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_consume :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_dec_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_dec_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_dec_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_dec_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_inc_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_inc_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_inc_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_inc_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_2addr_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_2addr_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_2addr_stride64_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_2addr_stride64_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_addtid_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_b128 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_b96 :ref:`vdst<amdgpu_synid_gfx12_vdst_48e42f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_i8_d16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_i8_d16_hi :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_u16_d16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_u16_d16_hi :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_u8_d16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_load_u8_d16_hi :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_max_i32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_max_i64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_max_num_f32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_max_num_f64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_max_num_rtn_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_max_num_rtn_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_max_rtn_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_max_rtn_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_max_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_max_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_max_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_max_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_min_i32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_min_i64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_min_num_f32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_min_num_f64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_min_num_rtn_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_min_num_rtn_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_min_rtn_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_min_rtn_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_min_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_min_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_min_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_min_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_mskor_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_mskor_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_mskor_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_mskor_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_nop :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_or_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_or_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_or_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_or_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_permute_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_permute_from_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_permute_to_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_permute_to_simd_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_pk_add_bf16 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_pk_add_f16 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_pk_add_rtn_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_pk_add_rtn_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_rsub_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_rsub_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_rsub_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_rsub_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_2addr_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_2addr_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_2addr_stride64_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_2addr_stride64_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_addtid_b32 :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_b128 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_e016a1>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_b16 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_b16_d16_hi :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_b8 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_b8_d16_hi :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_store_b96 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_56f215>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_storexchg_2addr_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_storexchg_2addr_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_storexchg_2addr_stride64_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_storexchg_2addr_stride64_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>`, :ref:`data1<amdgpu_synid_gfx12_data1_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_storexchg_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_storexchg_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_sub_clamp_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_sub_clamp_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_sub_rtn_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_sub_rtn_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_sub_u32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_sub_u64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_swizzle_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_wrap_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>`, :ref:`data1<amdgpu_synid_gfx12_data1_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_xor_b32 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_xor_b64 :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_xor_rtn_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+ ds_xor_rtn_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`addr<amdgpu_synid_gfx12_addr>`, :ref:`data0<amdgpu_synid_gfx12_data0_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`offset0<amdgpu_synid_ds_offset80>` :ref:`offset1<amdgpu_synid_ds_offset81>`
+
+VDSDIR
+------
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ ds_direct_load :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>` :ref:`wait_va_vdst<amdgpu_synid_wait_va_vdst>` :ref:`wait_vdst<amdgpu_synid_wait_vdst>` :ref:`wait_vm_vsrc<amdgpu_synid_wait_vm_vsrc>`
+ ds_param_load :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`attr<amdgpu_synid_gfx12_attr>` :ref:`wait_va_vdst<amdgpu_synid_wait_va_vdst>` :ref:`wait_vdst<amdgpu_synid_wait_vdst>` :ref:`wait_vm_vsrc<amdgpu_synid_wait_vm_vsrc>`
+
+VERIF
+-----
+
+.. parsed-literal::
+
+ **INSTRUCTION**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ fake_s_delay_alu
+ fake_s_nop
+ fake_s_wait_alu
+ fake_s_wait_bvhcnt
+ fake_s_wait_dscnt
+ fake_s_wait_expcnt
+ fake_s_wait_kmcnt
+ fake_s_wait_loadcnt
+ fake_s_wait_samplecnt
+ fake_s_wait_storecnt
+ fake_s_waitcnt
+ fake_v_nop
+ ill_0
+ ill_1
+ ill_beef
+ metadata
+ verif_s_adjdelay_alu
+
+VEXPORT
+-------
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **SRC3** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ export :ref:`tgt<amdgpu_synid_gfx12_tgt>`, :ref:`vsrc0<amdgpu_synid_gfx12_vsrc0>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`, :ref:`vsrc2<amdgpu_synid_gfx12_vsrc2>`, :ref:`vsrc3<amdgpu_synid_gfx12_vsrc3>` :ref:`done<amdgpu_synid_done>` :ref:`row_en<amdgpu_synid_row_en>`
+
+VFLAT
+-----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ flat_atomic_add_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_add_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_add_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_and_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_and_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_cmpswap_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_cmpswap_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_e016a1>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_cond_sub_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_dec_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_dec_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_inc_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_inc_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_max_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_max_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_max_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_max_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_max_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_min_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_min_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_min_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_min_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_min_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_or_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_or_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_pk_add_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_pk_add_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_sub_clamp_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_sub_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_sub_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_swap_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_swap_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_xor_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_atomic_xor_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_b128 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_b96 :ref:`vdst<amdgpu_synid_gfx12_vdst_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_d16_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_d16_hi_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_d16_hi_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_d16_hi_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_d16_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_d16_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_load_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_store_b128 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_e016a1>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_store_b16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_store_b32 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_store_b64 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_store_b8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_store_b96 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_56f215>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_store_d16_hi_b16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ flat_store_d16_hi_b8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+
+VGLOBAL
+-------
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ global_atomic_add_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_add_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_add_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_and_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_and_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_cmpswap_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_cmpswap_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_e016a1>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_cond_sub_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_dec_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_dec_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_inc_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_inc_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_max_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_max_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_max_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_max_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_max_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_min_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_min_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_min_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_min_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_min_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_or_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_or_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_ordered_add_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_pk_add_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_pk_add_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_sub_clamp_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_sub_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_sub_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_swap_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_swap_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_xor_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_atomic_xor_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_inv :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_addtid_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_b128 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_b96 :ref:`vdst<amdgpu_synid_gfx12_vdst_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_block :ref:`vdst<amdgpu_synid_gfx12_vdst_2eda77>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_d16_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_d16_hi_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_d16_hi_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_d16_hi_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_d16_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_d16_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_lds_addtid_b32 :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_lds_b32 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_lds_i16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_lds_i8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_lds_u16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_lds_u8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_tr_b128 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_tr_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_load_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_store_addtid_b32 :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_store_b128 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_e016a1>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_store_b16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_store_b32 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_store_b64 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_store_b8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_store_b96 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_56f215>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_store_block :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_89fd7b>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_store_d16_hi_b16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_store_d16_hi_b8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_f2b449>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_cdc95c>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_wb :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ global_wbinv :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+
+VIMAGE
+------
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ image_atomic_add_flt :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_add_uint :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_and :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_cmpswap :ref:`vdata<amdgpu_synid_gfx12_vdata_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_dec_uint :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_inc_uint :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_max_flt :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_max_int :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_max_uint :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_min_flt :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_min_int :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_min_uint :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_or :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_pk_add_bf16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_pk_add_f16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_sub_uint :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_swap :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_atomic_xor :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_bvh64_intersect_ray :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c12f43>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_bvh8_intersect_ray :ref:`vdata<amdgpu_synid_gfx12_vdata_aac3e8>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_a972b9>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_bvh_dual_intersect_ray :ref:`vdata<amdgpu_synid_gfx12_vdata_aac3e8>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c12f43>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_bvh_intersect_ray :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_a972b9>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_5fe6d8>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_get_resinfo :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_load :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_load_mip :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_load_mip_pck :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_load_mip_pck_sgn :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_load_pck :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_load_pck_sgn :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_rsvd_atomic_umax_8 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_rsvd_atomic_umin_8 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_store :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_store_mip :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_store_mip_pck :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_store_pck :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+
+VINTERP
+-------
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ v_interp_p10_f16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`wait_exp<amdgpu_synid_wait_exp>`
+ v_interp_p10_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`wait_exp<amdgpu_synid_wait_exp>`
+ v_interp_p10_rtz_f16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`wait_exp<amdgpu_synid_wait_exp>`
+ v_interp_p2_f16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`wait_exp<amdgpu_synid_wait_exp>`
+ v_interp_p2_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`wait_exp<amdgpu_synid_wait_exp>`
+ v_interp_p2_rtz_f16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>` :ref:`clamp<amdgpu_synid_clamp>` :ref:`wait_exp<amdgpu_synid_wait_exp>`
+
+VOP1
+----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ v_bfrev_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_ceil_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_ceil_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_ceil_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cls_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_clz_i32_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cos_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cos_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_ctz_i32_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f16_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f16_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f32_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f32_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f32_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f32_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f32_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f32_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f32_ubyte0 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f32_ubyte1 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f32_ubyte2 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f32_ubyte3 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f64_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f64_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_f64_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_floor_i32_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_i16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_i32_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_i32_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_i32_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_nearest_i32_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_norm_i16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_norm_u16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_off_f32_i4 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_f32_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_f32_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_u16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_u32_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_u32_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_u32_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_exp_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_exp_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_floor_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_floor_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_floor_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fract_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fract_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fract_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_frexp_exp_i16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_frexp_exp_i32_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_frexp_exp_i32_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_frexp_mant_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_frexp_mant_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_frexp_mant_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_log_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_log_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mov_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mov_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mov_fed_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mov_from_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mov_to_global_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_movreld_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_movrels_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_movrelsd_2_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_movrelsd_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_nop :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_not_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_not_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_permlane64_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_pipeflush :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_rcp_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_rcp_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_rcp_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_rcp_iflag_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_readfirstlane_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_rndne_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_rndne_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_rndne_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_rsq_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_rsq_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_rsq_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sat_pk_u8_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sin_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sin_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sqrt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sqrt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sqrt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_swap_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_swap_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_swaprel_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_trunc_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_trunc_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_trunc_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_writelane_regwr_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+
+VOP2
+----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST0** **DST1** **SRC0** **SRC1** **SRC2** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ v_add_co_ci_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_add_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_add_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_add_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_add_nc_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_add_nc_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_and_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_ashrrev_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cndmask_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_rtz_f16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fmaak_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fmaak_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fmaak_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`literal<amdgpu_synid_gfx12_literal_1f74c7>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fmac_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fmac_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fmac_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fmamk_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fmamk_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fmamk_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`literal<amdgpu_synid_gfx12_literal_1f74c7>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_illegal :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_ldexp_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_lshlrev_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_lshlrev_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_lshrrev_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max_num_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min_num_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_dx9_zero_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_hi_i32_i24 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_hi_u32_u24 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_i32_i24 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_u32_u24 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_or_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_pk_fmac_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sub_co_ci_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sub_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sub_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sub_nc_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sub_nc_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_subrev_co_ci_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_subrev_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_subrev_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_subrev_nc_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_xnor_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_xor_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+
+VOP3
+----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST0** **DST1** **SRC0** **SRC1** **SRC2** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ v_add3_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_add_co_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_add_lshl_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_add_nc_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_add_nc_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_add_nc_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_alignbit_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_alignbyte_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_and_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_and_or_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_ashrrev_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_ashrrev_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_bcnt_u32_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_bfe_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_bfe_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_bfi_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_bfm_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cndmask_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_2797bc>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cubeid_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cubema_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cubesc_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cubetc_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_bf8_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_fp8_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_i16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_i16_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_norm_i16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_norm_i16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_norm_u16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_norm_u16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_u16_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_u16_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_pk_u8_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_sr_bf8_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cvt_sr_fp8_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_div_fixup_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_div_fixup_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_div_fixup_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_div_fmas_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_div_fmas_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_div_scale_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_div_scale_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_dot2_bf16_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_dot2_f16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fma_dx9_zero_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fma_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fma_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_fma_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_ldexp_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_ldexp_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_lerp_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_lshl_add_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_lshl_add_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_lshl_or_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_lshlrev_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_lshrrev_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_lshrrev_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mad_co_i64_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mad_co_u64_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mad_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mad_i32_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mad_i32_i24 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mad_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mad_u32_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mad_u32_u24 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max3_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max3_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max3_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max3_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max3_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max3_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_max_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_maximum3_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_maximum3_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_maximum_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_maximum_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_maximum_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_maximumminimum_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_maximumminimum_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_maxmin_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_maxmin_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_maxmin_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_maxmin_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mbcnt_hi_u32_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mbcnt_lo_u32_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_med3_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_med3_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_med3_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_med3_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_med3_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_med3_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min3_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min3_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min3_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min3_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min3_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min3_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_min_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_minimum3_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_minimum3_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_minimum_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_minimum_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_minimum_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_minimummaximum_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_minimummaximum_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_minmax_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_minmax_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_minmax_num_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_minmax_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mqsad_pk_u16_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mqsad_u32_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_e016a1>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_msad_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_hi_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_hi_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_lo_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mul_lo_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_mullit_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_or3_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_or_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_pack_b32_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_perm_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_permlane16_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_permlane16_var_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_permlanex16_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_permlanex16_var_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_qsad_pk_u16_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_readlane_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_977794>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_readlane_regrd_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_977794>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_s_exp_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_85aab6>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_s_exp_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_s_log_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_85aab6>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_s_log_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_s_rcp_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_85aab6>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_s_rcp_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_s_rsq_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_85aab6>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_s_rsq_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_s_sqrt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_85aab6>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_s_sqrt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_836716>`, :ref:`src0<amdgpu_synid_gfx12_src0_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sad_hi_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sad_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sad_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sad_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sub_co_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sub_nc_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sub_nc_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_sub_nc_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_subrev_co_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`sdst<amdgpu_synid_gfx12_sdst_e701cc>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_trig_preop_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_writelane_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_c4593f>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_977794>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_xad_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_xor3_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_xor_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+
+VOP3P
+-----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ v_dot2_f32_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_dot2_f32_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_dot4_f32_bf8_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_dot4_f32_bf8_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_dot4_f32_fp8_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_dot4_f32_fp8_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_dot4_i32_iu8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_dot4_u32_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_dot8_i32_iu4 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_dot8_u32_u4 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_fma_mix_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_fma_mixhi_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_fma_mixlo_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_pk_add_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_add_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_add_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_add_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_ashrrev_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_fma_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_pk_fma_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`, :ref:`src1<amdgpu_synid_gfx12_src1_5cae62>`, :ref:`src2<amdgpu_synid_gfx12_src2_5cae62>`
+ v_pk_lshlrev_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_lshrrev_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_mad_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_pk_mad_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`, :ref:`src2<amdgpu_synid_gfx12_src2_5727cf>`
+ v_pk_max_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_max_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_max_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_maximum_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_min_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_min_num_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_min_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_minimum_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_mul_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_mul_lo_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_sub_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_pk_sub_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`, :ref:`src1<amdgpu_synid_gfx12_src1_5727cf>`
+ v_swmmac_bf16_16x16x32_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_731030>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>`
+ v_swmmac_f16_16x16x32_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_731030>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>`
+ v_swmmac_f32_16x16x32_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_731030>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>`
+ v_swmmac_f32_16x16x32_bf8_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>`
+ v_swmmac_f32_16x16x32_bf8_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>`
+ v_swmmac_f32_16x16x32_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_731030>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>`
+ v_swmmac_f32_16x16x32_fp8_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>`
+ v_swmmac_f32_16x16x32_fp8_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>`
+ v_swmmac_i32_16x16x32_iu4 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>`
+ v_swmmac_i32_16x16x32_iu8 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>`
+ v_swmmac_i32_16x16x64_iu4 :ref:`vdst<amdgpu_synid_gfx12_vdst_47d3bc>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_6802ce>`
+ v_wmma_bf16_16x16x16_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_7b936a>`
+ v_wmma_f16_16x16x16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_7b936a>`
+ v_wmma_f32_16x16x16_bf16 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>`
+ v_wmma_f32_16x16x16_bf8_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>`
+ v_wmma_f32_16x16x16_bf8_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>`
+ v_wmma_f32_16x16x16_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_e016a1>`, :ref:`src1<amdgpu_synid_gfx12_src1_e016a1>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>`
+ v_wmma_f32_16x16x16_fp8_bf8 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>`
+ v_wmma_f32_16x16x16_fp8_fp8 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>`
+ v_wmma_i32_16x16x16_iu4 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_6802ce>`, :ref:`src1<amdgpu_synid_gfx12_src1_6802ce>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>`
+ v_wmma_i32_16x16x16_iu8 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>`
+ v_wmma_i32_16x16x32_iu4 :ref:`vdst<amdgpu_synid_gfx12_vdst_227281>`, :ref:`src0<amdgpu_synid_gfx12_src0_fd235e>`, :ref:`src1<amdgpu_synid_gfx12_src1_fd235e>`, :ref:`src2<amdgpu_synid_gfx12_src2_96fbd3>`
+
+VOPC
+----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ v_cmp_class_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_class_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_class_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_eq_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_eq_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_eq_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_eq_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_eq_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_eq_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_eq_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_eq_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_eq_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_f_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_f_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_f_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_f_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_f_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_f_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_f_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ge_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ge_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ge_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ge_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ge_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ge_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ge_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ge_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ge_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_gt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_gt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_gt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_gt_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_gt_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_gt_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_gt_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_gt_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_gt_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_le_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_le_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_le_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_le_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_le_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_le_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_le_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_le_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_le_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_lg_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_lg_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_lg_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_lt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_lt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_lt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_lt_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_lt_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_lt_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_lt_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_lt_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_lt_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ne_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ne_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ne_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ne_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ne_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ne_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_neq_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_neq_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_neq_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_nge_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_nge_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_nge_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ngt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ngt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_ngt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_nle_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_nle_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_nle_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_nlg_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_nlg_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_nlg_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_nlt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_nlt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_nlt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_o_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_o_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_o_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_t_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_t_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_t_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_t_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_t_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_t_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_t_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_u_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_u_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmp_u_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_006c40>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_class_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_class_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_class_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_eq_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_eq_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_eq_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_eq_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_eq_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_eq_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_eq_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_eq_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_eq_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_f_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_f_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_f_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_f_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_f_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_f_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_f_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ge_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ge_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ge_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ge_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ge_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ge_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ge_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ge_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ge_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_gt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_gt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_gt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_gt_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_gt_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_gt_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_gt_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_gt_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_gt_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_le_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_le_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_le_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_le_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_le_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_le_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_le_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_le_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_le_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_lg_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_lg_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_lg_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_lt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_lt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_lt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_lt_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_lt_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_lt_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_lt_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_lt_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_lt_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ne_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ne_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ne_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ne_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ne_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ne_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_neq_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_neq_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_neq_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_nge_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_nge_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_nge_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ngt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ngt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_ngt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_nle_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_nle_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_nle_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_nlg_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_nlg_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_nlg_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_nlt_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_nlt_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_nlt_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_o_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_o_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_o_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_t_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_t_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_t_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_t_i32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_t_i64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_t_u32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_t_u64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_u_f16 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_u_f32 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5727cf>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_6802ce>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+ v_cmpx_u_f64 :ref:`vdst<amdgpu_synid_gfx12_vdst_7de8e7>`, :ref:`src0<amdgpu_synid_gfx12_src0_5cae62>`::ref:`m<amdgpu_synid_gfx12_m>`, :ref:`vsrc1<amdgpu_synid_gfx12_vsrc1_fd235e>`::ref:`m<amdgpu_synid_gfx12_m>` :ref:`omod<amdgpu_synid_omod>` :ref:`clamp<amdgpu_synid_clamp>`
+
+VOPD
+----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST0** **DST1** **SRC0** **SRC1** **SRC2** **SRC3** **SRC4** **SRC5**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ v_dual_add_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_add_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_add_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_add_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_add_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_add_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_add_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_add_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_add_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_add_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_add_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_add_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_add_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`
+ v_dual_add_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_add_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_add_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_add_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_cndmask_b32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_cndmask_b32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_cndmask_b32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_cndmask_b32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_dot2acc_f32_bf16_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_bf16_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_bf16_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_bf16_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_dot2acc_f32_bf16_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_bf16_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_bf16_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_dot2acc_f32_bf16_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_bf16_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_dot2acc_f32_bf16_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_bf16_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_bf16_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_bf16_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`
+ v_dual_dot2acc_f32_bf16_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_bf16_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_bf16_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_bf16_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_dot2acc_f32_f16_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_dot2acc_f32_f16_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_dot2acc_f32_f16_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`
+ v_dual_dot2acc_f32_f16_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_dot2acc_f32_f16_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmaak_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmaak_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmac_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmac_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmac_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmac_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_fmac_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmac_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmac_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmac_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmac_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmac_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmac_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmac_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmac_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`
+ v_dual_fmac_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmac_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmac_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmac_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_fmamk_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmamk_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_max_num_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_max_num_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_max_num_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_max_num_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_max_num_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_max_num_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_max_num_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_max_num_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_max_num_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_max_num_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_max_num_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_max_num_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_max_num_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`
+ v_dual_max_num_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_max_num_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_max_num_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_max_num_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_min_num_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_min_num_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_min_num_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`
+ v_dual_min_num_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_min_num_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_mov_b32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_mov_b32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_mov_b32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`
+ v_dual_mov_b32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mov_b32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_mul_dx9_zero_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_mul_dx9_zero_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_mul_dx9_zero_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`
+ v_dual_mul_dx9_zero_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_dx9_zero_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_mul_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_mul_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_mul_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`
+ v_dual_mul_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_mul_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_sub_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_sub_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_sub_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`
+ v_dual_sub_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_sub_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_add_nc_u32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_and_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_subrev_f32_x_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_subrev_f32_x_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_subrev_f32_x_lshlrev_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`
+ v_dual_subrev_f32_x_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_subrev_f32_x_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+
+VOPDX
+-----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ v_dual_add_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`
+ v_dual_cndmask_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`vcc<amdgpu_synid_gfx12_vcc>`
+ v_dual_dot2acc_f32_bf16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`
+ v_dual_dot2acc_f32_f16 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`
+ v_dual_fmaak_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`
+ v_dual_fmac_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`
+ v_dual_fmamk_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`literal<amdgpu_synid_gfx12_literal_81e671>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`
+ v_dual_max_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`
+ v_dual_min_num_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`
+ v_dual_mov_b32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`
+ v_dual_mul_dx9_zero_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`
+ v_dual_mul_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`
+ v_dual_sub_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`
+ v_dual_subrev_f32 :ref:`vdstx<amdgpu_synid_gfx12_vdstx>`, :ref:`srcx0<amdgpu_synid_gfx12_srcx0>`, :ref:`vsrcx1<amdgpu_synid_gfx12_vsrcx1>`
+
+VOPDY
+-----
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ v_dual_add_nc_u32 :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_and_b32 :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+ v_dual_lshlrev_b32 :ref:`vdsty<amdgpu_synid_gfx12_vdsty>`, :ref:`srcy0<amdgpu_synid_gfx12_srcy0>`, :ref:`vsrcy1<amdgpu_synid_gfx12_vsrcy1>`
+
+VSAMPLE
+-------
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ image_gather4 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_b :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_b_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_c :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_c_b :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_c_b_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_c_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_c_l :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_c_lz :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_c_lz_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_l :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_lz :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_lz_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_gather4h :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_get_lod :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_msaa_load :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_d82160>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_b :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_b_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_b_cl_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_b_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_b :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_b_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_b_cl_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_b_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_cl_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_d :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_d_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_d_cl_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_d_cl_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_d_cl_o_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_d_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_d_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_d_o_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_l :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_l_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_lz :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_lz_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_c_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_cl_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_d :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_d_cl :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_d_cl_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_d_cl_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_d_cl_o_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_d_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_d_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_d_o_g16 :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_l :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_l_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_lz :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_lz_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ image_sample_o :ref:`vdata<amdgpu_synid_gfx12_vdata_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`rsrc<amdgpu_synid_gfx12_rsrc_c9f929>`, :ref:`samp<amdgpu_synid_gfx12_samp>` :ref:`dmask<amdgpu_synid_dmask>` :ref:`tfe<amdgpu_synid_tfe>` :ref:`unorm<amdgpu_synid_unorm>` :ref:`lwe<amdgpu_synid_lwe>` :ref:`dim<amdgpu_synid_dim>` :ref:`r128<amdgpu_synid_r128>` :ref:`a16<amdgpu_synid_a16>` :ref:`d16<amdgpu_synid_d16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+
+VSCRATCH
+--------
+
+.. parsed-literal::
+
+ **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS**
+ \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|
+ scratch_load_b128 :ref:`vdst<amdgpu_synid_gfx12_vdst_69a144>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_b32 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_b64 :ref:`vdst<amdgpu_synid_gfx12_vdst_bdb32f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_b96 :ref:`vdst<amdgpu_synid_gfx12_vdst_48e42f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_block :ref:`vdst<amdgpu_synid_gfx12_vdst_2eda77>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_d16_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_d16_hi_b16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_d16_hi_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_d16_hi_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_d16_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_d16_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_i16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_i8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_lds_b32 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_lds_i16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_lds_i8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_lds_u16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_lds_u8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_u16 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_load_u8 :ref:`vdst<amdgpu_synid_gfx12_vdst_89680f>`, :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_store_b128 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_e016a1>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_store_b16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_store_b32 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_store_b64 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_fd235e>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_store_b8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_store_b96 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_56f215>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_store_block :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_89fd7b>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_store_d16_hi_b16 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+ scratch_store_d16_hi_b8 :ref:`vaddr<amdgpu_synid_gfx12_vaddr_c8b8d4>`, :ref:`vsrc<amdgpu_synid_gfx12_vsrc_6802ce>`, :ref:`saddr<amdgpu_synid_gfx12_saddr_d42b64>` :ref:`offset<amdgpu_synid_ds_offset16>` :ref:`th<amdgpu_synid_th>` :ref:`scope<amdgpu_synid_scope>` :ref:`nv<amdgpu_synid_nv>`
+
+.. |---| unicode:: U+02014 .. em dash
+
+.. toctree::
+ :hidden:
+
+ gfx12_addr
+ gfx12_attr
+ gfx12_data0_56f215
+ gfx12_data0_6802ce
+ gfx12_data0_e016a1
+ gfx12_data0_fd235e
+ gfx12_data1_6802ce
+ gfx12_data1_731030
+ gfx12_data1_e016a1
+ gfx12_data1_fd235e
+ gfx12_ioffset
+ gfx12_literal_1f74c7
+ gfx12_literal_81e671
+ gfx12_m
+ gfx12_rsrc_5fe6d8
+ gfx12_rsrc_c9f929
+ gfx12_saddr_cdc95c
+ gfx12_saddr_d42b64
+ gfx12_samp
+ gfx12_sbase_453b95
+ gfx12_sbase_47adb7
+ gfx12_sdata_0974a4
+ gfx12_sdata_354189
+ gfx12_sdata_4585b8
+ gfx12_sdata_5c7b50
+ gfx12_sdata_6c003b
+ gfx12_sdata_836716
+ gfx12_sdata_d725ab
+ gfx12_sdata_dd9dd8
+ gfx12_sdst_006c40
+ gfx12_sdst_20064d
+ gfx12_sdst_354189
+ gfx12_sdst_836716
+ gfx12_sdst_ced58d
+ gfx12_sdst_e701cc
+ gfx12_simm16_15ccdd
+ gfx12_simm16_218bea
+ gfx12_simm16_39b593
+ gfx12_simm16_3d2a4f
+ gfx12_simm16_730a13
+ gfx12_simm16_7ed651
+ gfx12_simm16_81e671
+ gfx12_simm16_c98889
+ gfx12_simm16_cc1716
+ gfx12_simm16_ee8b30
+ gfx12_soffset_8ec073
+ gfx12_soffset_c5b88c
+ gfx12_soffset_ec005a
+ gfx12_src0_5727cf
+ gfx12_src0_5cae62
+ gfx12_src0_6802ce
+ gfx12_src0_85aab6
+ gfx12_src0_c4593f
+ gfx12_src0_e016a1
+ gfx12_src0_fd235e
+ gfx12_src1_5727cf
+ gfx12_src1_5cae62
+ gfx12_src1_6802ce
+ gfx12_src1_731030
+ gfx12_src1_977794
+ gfx12_src1_c4593f
+ gfx12_src1_e016a1
+ gfx12_src1_fd235e
+ gfx12_src2_2797bc
+ gfx12_src2_5727cf
+ gfx12_src2_5cae62
+ gfx12_src2_6802ce
+ gfx12_src2_7b936a
+ gfx12_src2_96fbd3
+ gfx12_src2_c4593f
+ gfx12_src2_e016a1
+ gfx12_srcx0
+ gfx12_srcy0
+ gfx12_ssrc0_007f9c
+ gfx12_ssrc0_1a9ca5
+ gfx12_ssrc0_245536
+ gfx12_ssrc0_2797bc
+ gfx12_ssrc0_bbb4c6
+ gfx12_ssrc0_c4593f
+ gfx12_ssrc1_bbb4c6
+ gfx12_ssrc1_c4593f
+ gfx12_tgt
+ gfx12_vaddr_a972b9
+ gfx12_vaddr_c12f43
+ gfx12_vaddr_c8b8d4
+ gfx12_vaddr_d82160
+ gfx12_vaddr_f2b449
+ gfx12_vcc
+ gfx12_vdata_2eda77
+ gfx12_vdata_48e42f
+ gfx12_vdata_69a144
+ gfx12_vdata_89680f
+ gfx12_vdata_aac3e8
+ gfx12_vdata_bdb32f
+ gfx12_vdst_006c40
+ gfx12_vdst_227281
+ gfx12_vdst_2eda77
+ gfx12_vdst_47d3bc
+ gfx12_vdst_48e42f
+ gfx12_vdst_69a144
+ gfx12_vdst_7de8e7
+ gfx12_vdst_836716
+ gfx12_vdst_89680f
+ gfx12_vdst_bdb32f
+ gfx12_vdstx
+ gfx12_vdsty
+ gfx12_vsrc0
+ gfx12_vsrc1_6802ce
+ gfx12_vsrc1_fd235e
+ gfx12_vsrc2
+ gfx12_vsrc3
+ gfx12_vsrc_56f215
+ gfx12_vsrc_6802ce
+ gfx12_vsrc_89fd7b
+ gfx12_vsrc_e016a1
+ gfx12_vsrc_fd235e
+ gfx12_vsrcx1
+ gfx12_vsrcy1
+ gfx12_clause
+ gfx12_delay
+ gfx12_hwreg
+ gfx12_imm16
+ gfx12_label
+ gfx12_sendmsg
+ gfx12_sendmsg_rtn
+ gfx12_version
+ gfx12_waitcnt
diff --git a/llvm/docs/AMDGPU/gfx12_addr.rst b/llvm/docs/AMDGPU/gfx12_addr.rst
new file mode 100644
index 0000000..d2fc0e0
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_addr.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_addr:
+
+addr
+====
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_attr.rst b/llvm/docs/AMDGPU/gfx12_attr.rst
new file mode 100644
index 0000000..a6c5c27
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_attr.rst
@@ -0,0 +1,28 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_attr:
+
+attr
+====
+
+Interpolation attribute and channel:
+
+ ============== ===================================
+ Syntax Description
+ ============== ===================================
+ attr{0..32}.x Attribute 0..32 with *x* channel.
+ attr{0..32}.y Attribute 0..32 with *y* channel.
+ attr{0..32}.z Attribute 0..32 with *z* channel.
+ attr{0..32}.w Attribute 0..32 with *w* channel.
+ ============== ===================================
+
+Examples:
+
+.. parsed-literal::
+
+ ds_param_load v1, attr0.x
diff --git a/llvm/docs/AMDGPU/gfx12_clause.rst b/llvm/docs/AMDGPU/gfx12_clause.rst
new file mode 100644
index 0000000..88feb3b
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_clause.rst
@@ -0,0 +1,7 @@
+.. _amdgpu_synid_clause:
+
+clause
+======
+
+Description of a clause following this instruction.
+
diff --git a/llvm/docs/AMDGPU/gfx12_data0_56f215.rst b/llvm/docs/AMDGPU/gfx12_data0_56f215.rst
new file mode 100644
index 0000000..d8dde00
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_data0_56f215.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_data0_56f215:
+
+data0
+=====
+
+Instruction input.
+
+*Size:* 3 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_data0_6802ce.rst b/llvm/docs/AMDGPU/gfx12_data0_6802ce.rst
new file mode 100644
index 0000000..02fe36f
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_data0_6802ce.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_data0_6802ce:
+
+data0
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_data0_e016a1.rst b/llvm/docs/AMDGPU/gfx12_data0_e016a1.rst
new file mode 100644
index 0000000..914715b
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_data0_e016a1.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_data0_e016a1:
+
+data0
+=====
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_data0_fd235e.rst b/llvm/docs/AMDGPU/gfx12_data0_fd235e.rst
new file mode 100644
index 0000000..7617c61
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_data0_fd235e.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_data0_fd235e:
+
+data0
+=====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_data1_6802ce.rst b/llvm/docs/AMDGPU/gfx12_data1_6802ce.rst
new file mode 100644
index 0000000..318db2d
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_data1_6802ce.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_data1_6802ce:
+
+data1
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_data1_731030.rst b/llvm/docs/AMDGPU/gfx12_data1_731030.rst
new file mode 100644
index 0000000..1a6eda6
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_data1_731030.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_data1_731030:
+
+data1
+=====
+
+Instruction input.
+
+*Size:* 8 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_data1_e016a1.rst b/llvm/docs/AMDGPU/gfx12_data1_e016a1.rst
new file mode 100644
index 0000000..dee4148
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_data1_e016a1.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_data1_e016a1:
+
+data1
+=====
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_data1_fd235e.rst b/llvm/docs/AMDGPU/gfx12_data1_fd235e.rst
new file mode 100644
index 0000000..c8d4a88
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_data1_fd235e.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_data1_fd235e:
+
+data1
+=====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_delay.rst b/llvm/docs/AMDGPU/gfx12_delay.rst
new file mode 100644
index 0000000..600ece7
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_delay.rst
@@ -0,0 +1,74 @@
+.. _amdgpu_synid_delay:
+
+delay
+=====
+
+A delay between dependent SALU/VALU instructions.
+This operand may specify a delay for 2 instructions:
+the one after the current *s_delay_alu* instruction
+and for the second instruction indicated by *SKIP*.
+
+The bits of this operand have the following meaning:
+
+ ===== ========================================================== ============
+ Bits Description Value Range
+ ===== ========================================================== ============
+ 3:0 ID0: indicates a delay for the first instruction. 0..11
+ 6:4 SKIP: indicates the position of the second instruction. 0..5
+ 10:7 ID1: indicates a delay for the second instruction. 0..11
+ ===== ========================================================== ============
+
+This operand may be specified as one of the following:
+
+* An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range 0..0xFFFF.
+* A combination of *instid0*, *instskip*, *instid1* values described below.
+
+ ======================== =========================== ===============
+ Syntax Description Default Value
+ ======================== =========================== ===============
+ instid0(<*ID name*>) A symbolic *ID0* value. instid0(NO_DEP)
+ instskip(<*SKIP name*>) A symbolic *SKIP* value. instskip(SAME)
+ instid1(<*ID name*>) A symbolic *ID1* value. instid1(NO_DEP)
+ ======================== =========================== ===============
+
+These values may be specified in any order.
+When more than one value is specified, the values must be separated from each other by a '|'.
+
+Valid *ID names* are defined below.
+
+ =================== ===================================================================
+ Name Description
+ =================== ===================================================================
+ NO_DEP No dependency on any prior instruction. This is the default value.
+ VALU_DEP_1 Dependency on a previous VALU instruction, 1 opcode back.
+ VALU_DEP_2 Dependency on a previous VALU instruction, 2 opcodes back.
+ VALU_DEP_3 Dependency on a previous VALU instruction, 3 opcodes back.
+ VALU_DEP_4 Dependency on a previous VALU instruction, 4 opcodes back.
+ TRANS32_DEP_1 Dependency on a previous TRANS32 instruction, 1 opcode back.
+ TRANS32_DEP_2 Dependency on a previous TRANS32 instruction, 2 opcodes back.
+ TRANS32_DEP_3 Dependency on a previous TRANS32 instruction, 3 opcodes back.
+ FMA_ACCUM_CYCLE_1 Single cycle penalty for FMA accumulation.
+ SALU_CYCLE_1 1 cycle penalty for a prior SALU instruction.
+ SALU_CYCLE_2 2 cycle penalty for a prior SALU instruction.
+ SALU_CYCLE_3 3 cycle penalty for a prior SALU instruction.
+ =================== ===================================================================
+
+Legal *SKIP names* are described in the following table.
+
+ ======== ============================================================================
+ Name Description
+ ======== ============================================================================
+ SAME Apply second dependency to the same instruction. This is the default value.
+ NEXT Apply second dependency to the next instruction.
+ SKIP_1 Skip 1 instruction then apply dependency.
+ SKIP_2 Skip 2 instructions then apply dependency.
+ SKIP_3 Skip 3 instructions then apply dependency.
+ SKIP_4 Skip 4 instructions then apply dependency.
+ ======== ============================================================================
+
+Examples:
+
+.. parsed-literal::
+
+ s_delay_alu instid0(VALU_DEP_1)
+ s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
diff --git a/llvm/docs/AMDGPU/gfx12_hwreg.rst b/llvm/docs/AMDGPU/gfx12_hwreg.rst
new file mode 100644
index 0000000..d99cb20
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_hwreg.rst
@@ -0,0 +1,76 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_hwreg:
+
+hwreg
+=====
+
+Bits of a hardware register being accessed.
+
+The bits of this operand have the following meaning:
+
+ ======= ===================== ============
+ Bits Description Value Range
+ ======= ===================== ============
+ 5:0 Register *id*. 0..63
+ 10:6 First bit *offset*. 0..31
+ 15:11 *Size* in bits. 1..32
+ ======= ===================== ============
+
+This operand may be specified as one of the following:
+
+* An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range 0..0xFFFF.
+* An *hwreg* value described below.
+
+ ==================================== ============================================================================
+ Hwreg Value Syntax Description
+ ==================================== ============================================================================
+ hwreg({0..63}) All bits of a register indicated by its *id*.
+ hwreg(<*name*>) All bits of a register indicated by its *name*.
+ hwreg({0..63}, {0..31}, {1..32}) Register bits indicated by register *id*, first bit *offset* and *size*.
+ hwreg(<*name*>, {0..31}, {1..32}) Register bits indicated by register *name*, first bit *offset* and *size*.
+ ==================================== ============================================================================
+
+Numeric values may be specified as positive :ref:`integer numbers<amdgpu_synid_integer_number>`
+or :ref:`absolute expressions<amdgpu_synid_absolute_expression>`.
+
+Defined register *names* include:
+
+ =================== ==========================================
+ Name Description
+ =================== ==========================================
+ HW_REG_MODE Shader writeable mode bits.
+ HW_REG_STATUS Shader read-only status.
+ HW_REG_TRAPSTS Trap status.
+ HW_REG_HW_ID1 Id of wave, simd, compute unit, etc.
+ HW_REG_HW_ID2 Id of queue, pipeline, etc.
+ HW_REG_GPR_ALLOC Per-wave SGPR and VGPR allocation.
+ HW_REG_LDS_ALLOC Per-wave LDS allocation.
+ HW_REG_IB_STS Counters of outstanding instructions.
+ HW_REG_SH_MEM_BASES Memory aperture.
+ HW_REG_FLAT_SCR_LO flat_scratch_lo register.
+ HW_REG_FLAT_SCR_HI flat_scratch_hi register.
+ =================== ==========================================
+
+Examples:
+
+.. parsed-literal::
+
+ reg = 1
+ offset = 2
+ size = 4
+ hwreg_enc = reg | (offset << 6) | ((size - 1) << 11)
+
+ s_getreg_b32 s2, 0x1881
+ s_getreg_b32 s2, hwreg_enc // the same as above
+ s_getreg_b32 s2, hwreg(1, 2, 4) // the same as above
+ s_getreg_b32 s2, hwreg(reg, offset, size) // the same as above
+
+ s_getreg_b32 s2, hwreg(15)
+ s_getreg_b32 s2, hwreg(51, 1, 31)
+ s_getreg_b32 s2, hwreg(HW_REG_LDS_ALLOC, 0, 1)
diff --git a/llvm/docs/AMDGPU/gfx12_imm16.rst b/llvm/docs/AMDGPU/gfx12_imm16.rst
new file mode 100644
index 0000000..44e6d58
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_imm16.rst
@@ -0,0 +1,7 @@
+.. _amdgpu_synid_imm16:
+
+imm16
+======
+
+An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range -32768..65535.
+
diff --git a/llvm/docs/AMDGPU/gfx12_ioffset.rst b/llvm/docs/AMDGPU/gfx12_ioffset.rst
new file mode 100644
index 0000000..0901b77
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_ioffset.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_ioffset:
+
+ioffset
+=======
+
+*Size:* 1 dword.
+
+*Operands:*
diff --git a/llvm/docs/AMDGPU/gfx12_label.rst b/llvm/docs/AMDGPU/gfx12_label.rst
new file mode 100644
index 0000000..bdd6e1c
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_label.rst
@@ -0,0 +1,29 @@
+.. _amdgpu_synid_label:
+
+label
+=====
+
+A branch target which is a 16-bit signed integer treated as a PC-relative dword offset.
+
+This operand may be specified as one of the following:
+
+* An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range -32768..65535.
+* A :ref:`symbol<amdgpu_synid_symbol>` (for example, a label) representing a relocatable address in the same compilation unit where it is referred from. The value is handled as a 16-bit PC-relative dword offset to be resolved by a linker.
+
+Examples:
+
+.. parsed-literal::
+
+ offset = 30
+ label_1:
+ label_2 = . + 4
+
+ s_branch 32
+ s_branch offset + 2
+ s_branch label_1
+ s_branch label_2
+ s_branch label_3
+ s_branch label_4
+
+ label_3 = label_2 + 4
+ label_4:
diff --git a/llvm/docs/AMDGPU/gfx12_literal_1f74c7.rst b/llvm/docs/AMDGPU/gfx12_literal_1f74c7.rst
new file mode 100644
index 0000000..7442c5d
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_literal_1f74c7.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_literal_1f74c7:
+
+literal
+=======
+
+*Size:* 2 dwords.
+
+*Operands:*
diff --git a/llvm/docs/AMDGPU/gfx12_literal_81e671.rst b/llvm/docs/AMDGPU/gfx12_literal_81e671.rst
new file mode 100644
index 0000000..ab1b056
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_literal_81e671.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_literal_81e671:
+
+literal
+=======
+
+*Size:* 1 dword.
+
+*Operands:*
diff --git a/llvm/docs/AMDGPU/gfx12_m.rst b/llvm/docs/AMDGPU/gfx12_m.rst
new file mode 100644
index 0000000..7cfee90
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_m.rst
@@ -0,0 +1,13 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_m:
+
+m
+=
+
+This operand may be used with floating point operand modifiers :ref:`abs<amdgpu_synid_abs>` and :ref:`neg<amdgpu_synid_neg>`.
diff --git a/llvm/docs/AMDGPU/gfx12_rsrc_5fe6d8.rst b/llvm/docs/AMDGPU/gfx12_rsrc_5fe6d8.rst
new file mode 100644
index 0000000..d1a475f
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_rsrc_5fe6d8.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_rsrc_5fe6d8:
+
+rsrc
+====
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_rsrc_c9f929.rst b/llvm/docs/AMDGPU/gfx12_rsrc_c9f929.rst
new file mode 100644
index 0000000..180ae06
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_rsrc_c9f929.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_rsrc_c9f929:
+
+rsrc
+====
+
+Instruction input.
+
+*Size:* 8 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_saddr_cdc95c.rst b/llvm/docs/AMDGPU/gfx12_saddr_cdc95c.rst
new file mode 100644
index 0000000..4b3511f
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_saddr_cdc95c.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_saddr_cdc95c:
+
+saddr
+=====
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_saddr_d42b64.rst b/llvm/docs/AMDGPU/gfx12_saddr_d42b64.rst
new file mode 100644
index 0000000..d3de11d
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_saddr_d42b64.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_saddr_d42b64:
+
+saddr
+=====
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_samp.rst b/llvm/docs/AMDGPU/gfx12_samp.rst
new file mode 100644
index 0000000..2bb15e5
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_samp.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_samp:
+
+samp
+====
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_sbase_453b95.rst b/llvm/docs/AMDGPU/gfx12_sbase_453b95.rst
new file mode 100644
index 0000000..54c2dee
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sbase_453b95.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sbase_453b95:
+
+sbase
+=====
+
+A 128-bit buffer resource constant for scalar memory operations which provides a base address, a size and a stride.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_sbase_47adb7.rst b/llvm/docs/AMDGPU/gfx12_sbase_47adb7.rst
new file mode 100644
index 0000000..2308b3d
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sbase_47adb7.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sbase_47adb7:
+
+sbase
+=====
+
+A 64-bit base address for scalar memory operations.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdata_0974a4.rst b/llvm/docs/AMDGPU/gfx12_sdata_0974a4.rst
new file mode 100644
index 0000000..d498f8c
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdata_0974a4.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdata_0974a4:
+
+sdata
+=====
+
+Instruction output.
+
+*Size:* 8 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdata_354189.rst b/llvm/docs/AMDGPU/gfx12_sdata_354189.rst
new file mode 100644
index 0000000..c506654
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdata_354189.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdata_354189:
+
+sdata
+=====
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdata_4585b8.rst b/llvm/docs/AMDGPU/gfx12_sdata_4585b8.rst
new file mode 100644
index 0000000..42f66f3
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdata_4585b8.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdata_4585b8:
+
+sdata
+=====
+
+Instruction output.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdata_5c7b50.rst b/llvm/docs/AMDGPU/gfx12_sdata_5c7b50.rst
new file mode 100644
index 0000000..028461a
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdata_5c7b50.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdata_5c7b50:
+
+sdata
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:*
diff --git a/llvm/docs/AMDGPU/gfx12_sdata_6c003b.rst b/llvm/docs/AMDGPU/gfx12_sdata_6c003b.rst
new file mode 100644
index 0000000..87e19a9
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdata_6c003b.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdata_6c003b:
+
+sdata
+=====
+
+Instruction output.
+
+*Size:* 16 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdata_836716.rst b/llvm/docs/AMDGPU/gfx12_sdata_836716.rst
new file mode 100644
index 0000000..be1bce9
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdata_836716.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdata_836716:
+
+sdata
+=====
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdata_d725ab.rst b/llvm/docs/AMDGPU/gfx12_sdata_d725ab.rst
new file mode 100644
index 0000000..c882df8
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdata_d725ab.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdata_d725ab:
+
+sdata
+=====
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`simm8<amdgpu_synid_simm8>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdata_dd9dd8.rst b/llvm/docs/AMDGPU/gfx12_sdata_dd9dd8.rst
new file mode 100644
index 0000000..6465889
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdata_dd9dd8.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdata_dd9dd8:
+
+sdata
+=====
+
+Instruction output.
+
+*Size:* 3 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdst_006c40.rst b/llvm/docs/AMDGPU/gfx12_sdst_006c40.rst
new file mode 100644
index 0000000..f269b05
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdst_006c40.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdst_006c40:
+
+sdst
+====
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`vcc<amdgpu_synid_vcc>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdst_20064d.rst b/llvm/docs/AMDGPU/gfx12_sdst_20064d.rst
new file mode 100644
index 0000000..83c11a2
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdst_20064d.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdst_20064d:
+
+sdst
+====
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdst_354189.rst b/llvm/docs/AMDGPU/gfx12_sdst_354189.rst
new file mode 100644
index 0000000..8433406
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdst_354189.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdst_354189:
+
+sdst
+====
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdst_836716.rst b/llvm/docs/AMDGPU/gfx12_sdst_836716.rst
new file mode 100644
index 0000000..abce569
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdst_836716.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdst_836716:
+
+sdst
+====
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdst_ced58d.rst b/llvm/docs/AMDGPU/gfx12_sdst_ced58d.rst
new file mode 100644
index 0000000..e0072d9
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdst_ced58d.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdst_ced58d:
+
+sdst
+====
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_sdst_e701cc.rst b/llvm/docs/AMDGPU/gfx12_sdst_e701cc.rst
new file mode 100644
index 0000000..33e8c37
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sdst_e701cc.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_sdst_e701cc:
+
+sdst
+====
+
+Instruction output.
+
+*Size:* 1 dword if wavefront size is 32, otherwise 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_sendmsg.rst b/llvm/docs/AMDGPU/gfx12_sendmsg.rst
new file mode 100644
index 0000000..cb51be0
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sendmsg.rst
@@ -0,0 +1,48 @@
+.. _amdgpu_synid_sendmsg:
+
+sendmsg
+=======
+
+An 8-bit value in simm16[7:0] encodes the message type.
+
+This operand may be specified as one of the following:
+
+* An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range 0..0xFFFF.
+* A *sendmsg* value described below.
+
+
+ ==================================== ====================================================
+ Sendmsg Value Syntax Description
+ ==================================== ====================================================
+ sendmsg(<*type*>) A message identified by its *type*.
+ ==================================== ====================================================
+
+*Type* may be specified using message *name* or message *id*.
+
+Numeric values may be specified as positive :ref:`integer numbers<amdgpu_synid_integer_number>`
+or :ref:`absolute expressions<amdgpu_synid_absolute_expression>`.
+
+
+Only the following message types are valid.
+
+ ====================== ===========
+ Message type simm16[7:0]
+ ====================== ===========
+ Reserved 0
+ MSG_INTERRUPT 1
+ MSG_HS_TESSFACTOR 2
+ MSG_DEALLOC_VGPRS 3
+ MSG_GS_ALLOC_REQ 9
+ ====================== ===========
+
+Examples:
+
+.. parsed-literal::
+
+ // numeric message code
+ msg = 0x1
+ s_sendmsg 0x3
+ s_sendmsg msg + 2
+
+ // sendmsg with strict arguments validation
+ s_sendmsg sendmsg(MSG_INTERRUPT)
diff --git a/llvm/docs/AMDGPU/gfx12_sendmsg_rtn.rst b/llvm/docs/AMDGPU/gfx12_sendmsg_rtn.rst
new file mode 100644
index 0000000..ebb591d
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_sendmsg_rtn.rst
@@ -0,0 +1,30 @@
+.. _amdgpu_synid_sendmsg_rtn:
+
+sendmsg_rtn
+===========
+
+An 8-bit value in the instruction to encode the message type.
+
+This operand may be specified as one of the following:
+
+ * An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range 0..0xFFFF.
+ * A *sendmsg* value described below.
+
+ ==================================== ====================================================
+ Sendmsg Value Syntax Description
+ ==================================== ====================================================
+ sendmsg(MSG_RTN_GET_DOORBELL) Get doorbell ID.
+ sendmsg(MSG_RTN_GET_DDID) Get Draw/Dispatch ID.
+ sendmsg(MSG_RTN_GET_TMA) Get TMA value.
+ sendmsg(MSG_RTN_GET_TBA) Get TBA value.
+ sendmsg(MSG_RTN_GET_REALTIME) Get REALTIME value.
+ sendmsg(MSG_RTN_SAVE_WAVE) Report that this wave is ready to be context-saved.
+ ==================================== ====================================================
+
+Examples:
+
+.. parsed-literal::
+
+ s_sendmsg_rtn_b32 s0, 132
+ s_sendmsg_rtn_b32 s0, sendmsg(MSG_GET_REALTIME)
+
diff --git a/llvm/docs/AMDGPU/gfx12_simm16_15ccdd.rst b/llvm/docs/AMDGPU/gfx12_simm16_15ccdd.rst
new file mode 100644
index 0000000..0cb1233
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_simm16_15ccdd.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_simm16_15ccdd:
+
+simm16
+======
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`version<amdgpu_synid_version>`
diff --git a/llvm/docs/AMDGPU/gfx12_simm16_218bea.rst b/llvm/docs/AMDGPU/gfx12_simm16_218bea.rst
new file mode 100644
index 0000000..e08605e
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_simm16_218bea.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_simm16_218bea:
+
+simm16
+======
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`waitcnt<amdgpu_synid_waitcnt>`
diff --git a/llvm/docs/AMDGPU/gfx12_simm16_39b593.rst b/llvm/docs/AMDGPU/gfx12_simm16_39b593.rst
new file mode 100644
index 0000000..babb4b6
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_simm16_39b593.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_simm16_39b593:
+
+simm16
+======
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`imm16<amdgpu_synid_imm16>`
diff --git a/llvm/docs/AMDGPU/gfx12_simm16_3d2a4f.rst b/llvm/docs/AMDGPU/gfx12_simm16_3d2a4f.rst
new file mode 100644
index 0000000..cc8dbc6
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_simm16_3d2a4f.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_simm16_3d2a4f:
+
+simm16
+======
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`label<amdgpu_synid_label>`
diff --git a/llvm/docs/AMDGPU/gfx12_simm16_730a13.rst b/llvm/docs/AMDGPU/gfx12_simm16_730a13.rst
new file mode 100644
index 0000000..93596db
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_simm16_730a13.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_simm16_730a13:
+
+simm16
+======
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`clause<amdgpu_synid_clause>`
diff --git a/llvm/docs/AMDGPU/gfx12_simm16_7ed651.rst b/llvm/docs/AMDGPU/gfx12_simm16_7ed651.rst
new file mode 100644
index 0000000..fc63930
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_simm16_7ed651.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_simm16_7ed651:
+
+simm16
+======
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`hwreg<amdgpu_synid_hwreg>`
diff --git a/llvm/docs/AMDGPU/gfx12_simm16_81e671.rst b/llvm/docs/AMDGPU/gfx12_simm16_81e671.rst
new file mode 100644
index 0000000..16dcf39
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_simm16_81e671.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_simm16_81e671:
+
+simm16
+======
+
+*Size:* 1 dword.
+
+*Operands:*
diff --git a/llvm/docs/AMDGPU/gfx12_simm16_c98889.rst b/llvm/docs/AMDGPU/gfx12_simm16_c98889.rst
new file mode 100644
index 0000000..03e007af
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_simm16_c98889.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_simm16_c98889:
+
+simm16
+======
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`delay<amdgpu_synid_delay>`
diff --git a/llvm/docs/AMDGPU/gfx12_simm16_cc1716.rst b/llvm/docs/AMDGPU/gfx12_simm16_cc1716.rst
new file mode 100644
index 0000000..e53f812
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_simm16_cc1716.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_simm16_cc1716:
+
+simm16
+======
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`hwreg<amdgpu_synid_hwreg>`
diff --git a/llvm/docs/AMDGPU/gfx12_simm16_ee8b30.rst b/llvm/docs/AMDGPU/gfx12_simm16_ee8b30.rst
new file mode 100644
index 0000000..9bdac9b
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_simm16_ee8b30.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_simm16_ee8b30:
+
+simm16
+======
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`sendmsg<amdgpu_synid_sendmsg>`
diff --git a/llvm/docs/AMDGPU/gfx12_soffset_8ec073.rst b/llvm/docs/AMDGPU/gfx12_soffset_8ec073.rst
new file mode 100644
index 0000000..44de030
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_soffset_8ec073.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_soffset_8ec073:
+
+soffset
+=======
+
+An unsigned 20-bit offset added to the base address to get memory address.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_soffset_c5b88c.rst b/llvm/docs/AMDGPU/gfx12_soffset_c5b88c.rst
new file mode 100644
index 0000000..d115150
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_soffset_c5b88c.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_soffset_c5b88c:
+
+soffset
+=======
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_soffset_ec005a.rst b/llvm/docs/AMDGPU/gfx12_soffset_ec005a.rst
new file mode 100644
index 0000000..bd571b6
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_soffset_ec005a.rst
@@ -0,0 +1,20 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_soffset_ec005a:
+
+soffset
+=======
+
+An offset added to the base address to get memory address.
+
+* If offset is specified as a register, it supplies an unsigned byte offset.
+* If offset is specified as a 21-bit immediate, it supplies a signed byte offset.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_src0_5727cf.rst b/llvm/docs/AMDGPU/gfx12_src0_5727cf.rst
new file mode 100644
index 0000000..15fde5c
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src0_5727cf.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src0_5727cf:
+
+src0
+====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_src0_5cae62.rst b/llvm/docs/AMDGPU/gfx12_src0_5cae62.rst
new file mode 100644
index 0000000..fa02f046
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src0_5cae62.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src0_5cae62:
+
+src0
+====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/llvm/docs/AMDGPU/gfx12_src0_6802ce.rst b/llvm/docs/AMDGPU/gfx12_src0_6802ce.rst
new file mode 100644
index 0000000..e17a719
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src0_6802ce.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src0_6802ce:
+
+src0
+====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_src0_85aab6.rst b/llvm/docs/AMDGPU/gfx12_src0_85aab6.rst
new file mode 100644
index 0000000..effa6f6
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src0_85aab6.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src0_85aab6:
+
+src0
+====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_src0_c4593f.rst b/llvm/docs/AMDGPU/gfx12_src0_c4593f.rst
new file mode 100644
index 0000000..bbe6191
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src0_c4593f.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src0_c4593f:
+
+src0
+====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_src0_e016a1.rst b/llvm/docs/AMDGPU/gfx12_src0_e016a1.rst
new file mode 100644
index 0000000..c2d23d7
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src0_e016a1.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src0_e016a1:
+
+src0
+====
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_src0_fd235e.rst b/llvm/docs/AMDGPU/gfx12_src0_fd235e.rst
new file mode 100644
index 0000000..dc048af
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src0_fd235e.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src0_fd235e:
+
+src0
+====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_src1_5727cf.rst b/llvm/docs/AMDGPU/gfx12_src1_5727cf.rst
new file mode 100644
index 0000000..d1d0837
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src1_5727cf.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src1_5727cf:
+
+src1
+====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_src1_5cae62.rst b/llvm/docs/AMDGPU/gfx12_src1_5cae62.rst
new file mode 100644
index 0000000..3ad591c
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src1_5cae62.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src1_5cae62:
+
+src1
+====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/llvm/docs/AMDGPU/gfx12_src1_6802ce.rst b/llvm/docs/AMDGPU/gfx12_src1_6802ce.rst
new file mode 100644
index 0000000..84ff631
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src1_6802ce.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src1_6802ce:
+
+src1
+====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_src1_731030.rst b/llvm/docs/AMDGPU/gfx12_src1_731030.rst
new file mode 100644
index 0000000..8c67699
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src1_731030.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src1_731030:
+
+src1
+====
+
+Instruction input.
+
+*Size:* 8 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_src1_977794.rst b/llvm/docs/AMDGPU/gfx12_src1_977794.rst
new file mode 100644
index 0000000..7651340
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src1_977794.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src1_977794:
+
+src1
+====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_src1_c4593f.rst b/llvm/docs/AMDGPU/gfx12_src1_c4593f.rst
new file mode 100644
index 0000000..aba4da8
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src1_c4593f.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src1_c4593f:
+
+src1
+====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_src1_e016a1.rst b/llvm/docs/AMDGPU/gfx12_src1_e016a1.rst
new file mode 100644
index 0000000..4385853
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src1_e016a1.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src1_e016a1:
+
+src1
+====
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_src1_fd235e.rst b/llvm/docs/AMDGPU/gfx12_src1_fd235e.rst
new file mode 100644
index 0000000..5863e93
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src1_fd235e.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src1_fd235e:
+
+src1
+====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_src2_2797bc.rst b/llvm/docs/AMDGPU/gfx12_src2_2797bc.rst
new file mode 100644
index 0000000..b393e2a
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src2_2797bc.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src2_2797bc:
+
+src2
+====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_src2_5727cf.rst b/llvm/docs/AMDGPU/gfx12_src2_5727cf.rst
new file mode 100644
index 0000000..9ffaa079
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src2_5727cf.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src2_5727cf:
+
+src2
+====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_src2_5cae62.rst b/llvm/docs/AMDGPU/gfx12_src2_5cae62.rst
new file mode 100644
index 0000000..46d65cb
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src2_5cae62.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src2_5cae62:
+
+src2
+====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/llvm/docs/AMDGPU/gfx12_src2_6802ce.rst b/llvm/docs/AMDGPU/gfx12_src2_6802ce.rst
new file mode 100644
index 0000000..0ad2ede
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src2_6802ce.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src2_6802ce:
+
+src2
+====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_src2_7b936a.rst b/llvm/docs/AMDGPU/gfx12_src2_7b936a.rst
new file mode 100644
index 0000000..9f1ea3c
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src2_7b936a.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src2_7b936a:
+
+src2
+====
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`fconst<amdgpu_synid_fconst>`
diff --git a/llvm/docs/AMDGPU/gfx12_src2_96fbd3.rst b/llvm/docs/AMDGPU/gfx12_src2_96fbd3.rst
new file mode 100644
index 0000000..884d089
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src2_96fbd3.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src2_96fbd3:
+
+src2
+====
+
+Instruction input.
+
+*Size:* 8 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`fconst<amdgpu_synid_fconst>`
diff --git a/llvm/docs/AMDGPU/gfx12_src2_c4593f.rst b/llvm/docs/AMDGPU/gfx12_src2_c4593f.rst
new file mode 100644
index 0000000..849230b
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src2_c4593f.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src2_c4593f:
+
+src2
+====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_src2_e016a1.rst b/llvm/docs/AMDGPU/gfx12_src2_e016a1.rst
new file mode 100644
index 0000000..266c4ea
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_src2_e016a1.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_src2_e016a1:
+
+src2
+====
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_srcx0.rst b/llvm/docs/AMDGPU/gfx12_srcx0.rst
new file mode 100644
index 0000000..57b05a1
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_srcx0.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_srcx0:
+
+srcx0
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_srcy0.rst b/llvm/docs/AMDGPU/gfx12_srcy0.rst
new file mode 100644
index 0000000..350b742
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_srcy0.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_srcy0:
+
+srcy0
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`, :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_007f9c.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_007f9c.rst
new file mode 100644
index 0000000..c3f33e4f
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_ssrc0_007f9c.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_ssrc0_007f9c:
+
+ssrc0
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_1a9ca5.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_1a9ca5.rst
new file mode 100644
index 0000000..5aa3f2d
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_ssrc0_1a9ca5.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_ssrc0_1a9ca5:
+
+ssrc0
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`m0<amdgpu_synid_m0>`
diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_245536.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_245536.rst
new file mode 100644
index 0000000..36925da
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_ssrc0_245536.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_ssrc0_245536:
+
+ssrc0
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`sendmsg_rtn<amdgpu_synid_sendmsg_rtn>`
diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_2797bc.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_2797bc.rst
new file mode 100644
index 0000000..4eae705
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_ssrc0_2797bc.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_ssrc0_2797bc:
+
+ssrc0
+=====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`
diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_bbb4c6.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_bbb4c6.rst
new file mode 100644
index 0000000..a29f83d
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_ssrc0_bbb4c6.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_ssrc0_bbb4c6:
+
+ssrc0
+=====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_c4593f.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_c4593f.rst
new file mode 100644
index 0000000..33ca4d6
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_ssrc0_c4593f.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_ssrc0_c4593f:
+
+ssrc0
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_ssrc1_bbb4c6.rst b/llvm/docs/AMDGPU/gfx12_ssrc1_bbb4c6.rst
new file mode 100644
index 0000000..1f3ea34
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_ssrc1_bbb4c6.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_ssrc1_bbb4c6:
+
+ssrc1
+=====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`
diff --git a/llvm/docs/AMDGPU/gfx12_ssrc1_c4593f.rst b/llvm/docs/AMDGPU/gfx12_ssrc1_c4593f.rst
new file mode 100644
index 0000000..f81d0f2
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_ssrc1_c4593f.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_ssrc1_c4593f:
+
+ssrc1
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`m0<amdgpu_synid_m0>`, :ref:`scc<amdgpu_synid_scc>`, :ref:`fconst<amdgpu_synid_fconst>`, :ref:`literal<amdgpu_synid_literal>`, :ref:`exec_hi<amdgpu_synid_exec_hi>`, :ref:`exec_lo<amdgpu_synid_exec_lo>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_tgt.rst b/llvm/docs/AMDGPU/gfx12_tgt.rst
new file mode 100644
index 0000000..83a25aa
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_tgt.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_tgt:
+
+tgt
+===
+
+Instruction output.
+
+*Size:* 4 dwords.
+
+*Operands:*
diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_a972b9.rst b/llvm/docs/AMDGPU/gfx12_vaddr_a972b9.rst
new file mode 100644
index 0000000..223b50d
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vaddr_a972b9.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vaddr_a972b9:
+
+vaddr
+=====
+
+*Size:* 11 dwords.
+
+*Operands:*
diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_c12f43.rst b/llvm/docs/AMDGPU/gfx12_vaddr_c12f43.rst
new file mode 100644
index 0000000..5a93efe
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vaddr_c12f43.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vaddr_c12f43:
+
+vaddr
+=====
+
+*Size:* 12 dwords.
+
+*Operands:*
diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_c8b8d4.rst b/llvm/docs/AMDGPU/gfx12_vaddr_c8b8d4.rst
new file mode 100644
index 0000000..1998e1d
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vaddr_c8b8d4.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vaddr_c8b8d4:
+
+vaddr
+=====
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_d82160.rst b/llvm/docs/AMDGPU/gfx12_vaddr_d82160.rst
new file mode 100644
index 0000000..92d09a2
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vaddr_d82160.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vaddr_d82160:
+
+vaddr
+=====
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_f2b449.rst b/llvm/docs/AMDGPU/gfx12_vaddr_f2b449.rst
new file mode 100644
index 0000000..10d7e0a
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vaddr_f2b449.rst
@@ -0,0 +1,15 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vaddr_f2b449:
+
+vaddr
+=====
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vcc.rst b/llvm/docs/AMDGPU/gfx12_vcc.rst
new file mode 100644
index 0000000..e8509ff
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vcc.rst
@@ -0,0 +1,16 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vcc:
+
+vcc
+===
+
+Vector condition code. This operand depends on wavefront size:
+
+* Should be :ref:`vcc_lo<amdgpu_synid_vcc_lo>` if wavefront size is 32.
+* Should be :ref:`vcc<amdgpu_synid_vcc>` if wavefront size is 64.
diff --git a/llvm/docs/AMDGPU/gfx12_vdata_2eda77.rst b/llvm/docs/AMDGPU/gfx12_vdata_2eda77.rst
new file mode 100644
index 0000000..839ec86
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdata_2eda77.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdata_2eda77:
+
+vdata
+=====
+
+Instruction output.
+
+*Size:* 32 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdata_48e42f.rst b/llvm/docs/AMDGPU/gfx12_vdata_48e42f.rst
new file mode 100644
index 0000000..d2ab49a
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdata_48e42f.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdata_48e42f:
+
+vdata
+=====
+
+Instruction output.
+
+*Size:* 3 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdata_69a144.rst b/llvm/docs/AMDGPU/gfx12_vdata_69a144.rst
new file mode 100644
index 0000000..22ac087
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdata_69a144.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdata_69a144:
+
+vdata
+=====
+
+Instruction output.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdata_89680f.rst b/llvm/docs/AMDGPU/gfx12_vdata_89680f.rst
new file mode 100644
index 0000000..5f4f478
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdata_89680f.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdata_89680f:
+
+vdata
+=====
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdata_aac3e8.rst b/llvm/docs/AMDGPU/gfx12_vdata_aac3e8.rst
new file mode 100644
index 0000000..2e285ef
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdata_aac3e8.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdata_aac3e8:
+
+vdata
+=====
+
+Instruction output.
+
+*Size:* 10 dwords.
+
+*Operands:*
diff --git a/llvm/docs/AMDGPU/gfx12_vdata_bdb32f.rst b/llvm/docs/AMDGPU/gfx12_vdata_bdb32f.rst
new file mode 100644
index 0000000..109c767
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdata_bdb32f.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdata_bdb32f:
+
+vdata
+=====
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdst_006c40.rst b/llvm/docs/AMDGPU/gfx12_vdst_006c40.rst
new file mode 100644
index 0000000..dc3ac95
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdst_006c40.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdst_006c40:
+
+vdst
+====
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`vcc<amdgpu_synid_vcc>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdst_227281.rst b/llvm/docs/AMDGPU/gfx12_vdst_227281.rst
new file mode 100644
index 0000000..13fd951
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdst_227281.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdst_227281:
+
+vdst
+====
+
+Instruction output.
+
+*Size:* 4 dwords if wavefront size is 64, otherwise 8 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdst_2eda77.rst b/llvm/docs/AMDGPU/gfx12_vdst_2eda77.rst
new file mode 100644
index 0000000..9372e48
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdst_2eda77.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdst_2eda77:
+
+vdst
+====
+
+Instruction output.
+
+*Size:* 32 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdst_47d3bc.rst b/llvm/docs/AMDGPU/gfx12_vdst_47d3bc.rst
new file mode 100644
index 0000000..056fe3f
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdst_47d3bc.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdst_47d3bc:
+
+vdst
+====
+
+Instruction output.
+
+*Size:* 8 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdst_48e42f.rst b/llvm/docs/AMDGPU/gfx12_vdst_48e42f.rst
new file mode 100644
index 0000000..84ab35b
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdst_48e42f.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdst_48e42f:
+
+vdst
+====
+
+Instruction output.
+
+*Size:* 3 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdst_69a144.rst b/llvm/docs/AMDGPU/gfx12_vdst_69a144.rst
new file mode 100644
index 0000000..70873ff
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdst_69a144.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdst_69a144:
+
+vdst
+====
+
+Instruction output.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdst_7de8e7.rst b/llvm/docs/AMDGPU/gfx12_vdst_7de8e7.rst
new file mode 100644
index 0000000..7248ea9
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdst_7de8e7.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdst_7de8e7:
+
+vdst
+====
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`exec<amdgpu_synid_exec>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdst_836716.rst b/llvm/docs/AMDGPU/gfx12_vdst_836716.rst
new file mode 100644
index 0000000..1cd43ee9
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdst_836716.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdst_836716:
+
+vdst
+====
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`s<amdgpu_synid_s>`, :ref:`ttmp<amdgpu_synid_ttmp>`, :ref:`null<amdgpu_synid_null>`, :ref:`vcc_hi<amdgpu_synid_vcc_hi>`, :ref:`vcc_lo<amdgpu_synid_vcc_lo>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdst_89680f.rst b/llvm/docs/AMDGPU/gfx12_vdst_89680f.rst
new file mode 100644
index 0000000..b4f055c
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdst_89680f.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdst_89680f:
+
+vdst
+====
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdst_bdb32f.rst b/llvm/docs/AMDGPU/gfx12_vdst_bdb32f.rst
new file mode 100644
index 0000000..e2a4a47
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdst_bdb32f.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdst_bdb32f:
+
+vdst
+====
+
+Instruction output.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdstx.rst b/llvm/docs/AMDGPU/gfx12_vdstx.rst
new file mode 100644
index 0000000..4b95d4d
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdstx.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdstx:
+
+vdstx
+=====
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vdsty.rst b/llvm/docs/AMDGPU/gfx12_vdsty.rst
new file mode 100644
index 0000000..cf0b464
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vdsty.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vdsty:
+
+vdsty
+=====
+
+Instruction output.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_version.rst b/llvm/docs/AMDGPU/gfx12_version.rst
new file mode 100644
index 0000000..4e490ca
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_version.rst
@@ -0,0 +1,7 @@
+.. _amdgpu_synid_version:
+
+version
+=======
+
+Microcode version header.
+
diff --git a/llvm/docs/AMDGPU/gfx12_vsrc0.rst b/llvm/docs/AMDGPU/gfx12_vsrc0.rst
new file mode 100644
index 0000000..fb38169
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vsrc0.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vsrc0:
+
+vsrc0
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vsrc1_6802ce.rst b/llvm/docs/AMDGPU/gfx12_vsrc1_6802ce.rst
new file mode 100644
index 0000000..4490545
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vsrc1_6802ce.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vsrc1_6802ce:
+
+vsrc1
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vsrc1_fd235e.rst b/llvm/docs/AMDGPU/gfx12_vsrc1_fd235e.rst
new file mode 100644
index 0000000..d6567c2
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vsrc1_fd235e.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vsrc1_fd235e:
+
+vsrc1
+=====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vsrc2.rst b/llvm/docs/AMDGPU/gfx12_vsrc2.rst
new file mode 100644
index 0000000..fe20832
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vsrc2.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vsrc2:
+
+vsrc2
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vsrc3.rst b/llvm/docs/AMDGPU/gfx12_vsrc3.rst
new file mode 100644
index 0000000..18df9e4
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vsrc3.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vsrc3:
+
+vsrc3
+=====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_56f215.rst b/llvm/docs/AMDGPU/gfx12_vsrc_56f215.rst
new file mode 100644
index 0000000..166da38
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vsrc_56f215.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vsrc_56f215:
+
+vsrc
+====
+
+Instruction input.
+
+*Size:* 3 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_6802ce.rst b/llvm/docs/AMDGPU/gfx12_vsrc_6802ce.rst
new file mode 100644
index 0000000..e879c2b
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vsrc_6802ce.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vsrc_6802ce:
+
+vsrc
+====
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_89fd7b.rst b/llvm/docs/AMDGPU/gfx12_vsrc_89fd7b.rst
new file mode 100644
index 0000000..c521e72
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vsrc_89fd7b.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vsrc_89fd7b:
+
+vsrc
+====
+
+Instruction input.
+
+*Size:* 32 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_e016a1.rst b/llvm/docs/AMDGPU/gfx12_vsrc_e016a1.rst
new file mode 100644
index 0000000..84eb2ed
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vsrc_e016a1.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vsrc_e016a1:
+
+vsrc
+====
+
+Instruction input.
+
+*Size:* 4 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_fd235e.rst b/llvm/docs/AMDGPU/gfx12_vsrc_fd235e.rst
new file mode 100644
index 0000000..640a235
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vsrc_fd235e.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vsrc_fd235e:
+
+vsrc
+====
+
+Instruction input.
+
+*Size:* 2 dwords.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vsrcx1.rst b/llvm/docs/AMDGPU/gfx12_vsrcx1.rst
new file mode 100644
index 0000000..9dab58c
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vsrcx1.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vsrcx1:
+
+vsrcx1
+======
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_vsrcy1.rst b/llvm/docs/AMDGPU/gfx12_vsrcy1.rst
new file mode 100644
index 0000000..496b2d6
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_vsrcy1.rst
@@ -0,0 +1,17 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_gfx12_vsrcy1:
+
+vsrcy1
+======
+
+Instruction input.
+
+*Size:* 1 dword.
+
+*Operands:* :ref:`v<amdgpu_synid_v>`
diff --git a/llvm/docs/AMDGPU/gfx12_waitcnt.rst b/llvm/docs/AMDGPU/gfx12_waitcnt.rst
new file mode 100644
index 0000000..4541222
--- /dev/null
+++ b/llvm/docs/AMDGPU/gfx12_waitcnt.rst
@@ -0,0 +1,55 @@
+..
+ **************************************************
+ * *
+ * Automatically generated file, do not edit! *
+ * *
+ **************************************************
+
+.. _amdgpu_synid_waitcnt:
+
+waitcnt
+=======
+
+Counts of outstanding instructions to wait for.
+
+The bits of this operand have the following meaning:
+
+ ===== ================================================ ============
+ Bits Description Value Range
+ ===== ================================================ ============
+ 2:0 EXP_CNT: export and LDSDIR count. 0..7
+ 3:3 Unused \-
+ 9:4 LGKM_CNT: LDS, GDS, Constant and Message count. 0..63
+ 15:10 VM_CNT: vector memory operations count. 0..63
+ ===== ================================================ ============
+
+This operand may be specified as one of the following:
+
+* An :ref:`integer_number<amdgpu_synid_integer_number>` or an :ref:`absolute_expression<amdgpu_synid_absolute_expression>`. The value must be in the range 0..0xFFFF.
+* A combination of *vmcnt*, *expcnt*, *lgkmcnt* and other values described below.
+
+ ====================== ======================================================================
+ Syntax Description
+ ====================== ======================================================================
+ vmcnt(<*N*>) A VM_CNT value. *N* must not exceed the largest VM_CNT value.
+ expcnt(<*N*>) An EXP_CNT value. *N* must not exceed the largest EXP_CNT value.
+ lgkmcnt(<*N*>) An LGKM_CNT value. *N* must not exceed the largest LGKM_CNT value.
+ vmcnt_sat(<*N*>) A VM_CNT value computed as min(*N*, the largest VM_CNT value).
+ expcnt_sat(<*N*>) An EXP_CNT value computed as min(*N*, the largest EXP_CNT value).
+ lgkmcnt_sat(<*N*>) An LGKM_CNT value computed as min(*N*, the largest LGKM_CNT value).
+ ====================== ======================================================================
+
+These values may be specified in any order. Spaces, ampersands and commas may be used as optional separators.
+
+*N* is either an
+:ref:`integer number<amdgpu_synid_integer_number>` or an
+:ref:`absolute expression<amdgpu_synid_absolute_expression>`.
+
+Examples:
+
+.. parsed-literal::
+
+ s_waitcnt vmcnt(1)
+ s_waitcnt expcnt(2) lgkmcnt(3)
+ s_waitcnt vmcnt(1), expcnt(2), lgkmcnt(3)
+ s_waitcnt vmcnt(1) & lgkmcnt_sat(100) & expcnt(2)
diff --git a/llvm/docs/AMDGPUModifierSyntax.rst b/llvm/docs/AMDGPUModifierSyntax.rst
index 334bdaf..8a60663 100644
--- a/llvm/docs/AMDGPUModifierSyntax.rst
+++ b/llvm/docs/AMDGPUModifierSyntax.rst
@@ -1078,6 +1078,73 @@ Examples:
offset:0xfffff
offset:-x
+.. _amdgpu_synid_smem_offset24s:
+
+offset24s
+~~~~~~~~~
+
+Specifies a signed 24-bit offset, in bytes. The default value is 0.
+
+ ============================= ====================================================================
+ Syntax Description
+ ============================= ====================================================================
+ offset:{-0x1000000..0xFFFFFF} Specifies an offset as an
+ :ref:`integer number <amdgpu_synid_integer_number>`
+ or an :ref:`absolute expression<amdgpu_synid_absolute_expression>`.
+ ============================= ====================================================================
+
+Examples:
+
+.. parsed-literal::
+
+ offset:-1
+ offset:0xfffff
+ offset:-x
+
+.. _amdgpu_synid_th:
+
+th
+~~
+
+Specifies temporal hint of memory operation.
+
+ =============================== =========================================================
+ Syntax Description
+ =============================== =========================================================
+ TH_{LOAD|STORE}_RT Regular
+ TH_{LOAD|STORE}_NT Non-temporal
+ TH_{LOAD|STORE}_HT High-temporal
+ TH_{LOAD|STORE}_LU Last use. Not available in SYS scope.
+ TH_{LOAD|STORE}_WB Regular (CU, SE); High-temporal with write-back (MALL)
+ TH_{LOAD|STORE}_NT_RT Non-temporal (CU, SE); Regular (MALL)
+ TH_{LOAD|STORE}_RT_NT Regular (CU, SE); Non-temporal (MALL)
+ TH_{LOAD|STORE}_NT_HT Non-temporal (CU, SE); High-temporal (MALL)
+ TH_{LOAD|STORE}_NT_WB Non-temporal (CU, SE); High-temporal with write-back (MALL)
+ TH_{LOAD|STORE}_BYPASS Available for SYS scope only.
+ TH_ATOMIC_RT Regular
+ TH_ATOMIC_RT_RETURN Regular. For atomic instructions that return values.
+ TH_ATOMIC_NT Non-temporal
+ TH_ATOMIC_NT_RETURN Non-temporal. For atomic instructions that return values.
+ TH_ATOMIC_CASCADE_RT Cascading atomic; Regular.
+ TH_ATOMIC_CASCADE_NT Cascading atomic; Non-temporal.
+ =============================== =========================================================
+
+.. _amdgpu_synid_scope:
+
+scope
+~~~~~
+
+Specifies scope of memory operation.
+
+ =============================== =========================================================
+ Syntax Description
+ =============================== =========================================================
+ SCOPE_CU Coherency within a Compute Unit.
+ SCOPE_SE Coherency within a Shader Engine.
+ SCOPE_DEV Coherency within a single device.
+ SCOPE_SYS Coherency across the full system.
+ =============================== =========================================================
+
VINTRP/VINTERP/LDSDIR Modifiers
-------------------------------
@@ -1117,6 +1184,27 @@ The default value is zero. This is a safe value, but it may be suboptimal.
issuing this instruction.
================ ======================================================
+.. _amdgpu_synid_wait_va_vdst:
+
+wait_va_vdst
+~~~~~~~~~~~~
+
+Manually specify a wait on the VA_VDST counter before issuing this instruction. VA_VDST must be less
+than or equal to this value before the instruction is issued. If set to 15, no wait is performed.
+
+If unspecified the current default is zero. This is a safe value but may have poor performance characteristics.
+
+This modifier is a shorthand for the WAR hazard where VALU reads a VGPR that is written by a parameter
+load. Since there is no VA_VSRC counter we must use VA_VDST as a proxy to detect when the
+VALU instruction has completed:
+
+Examples:
+
+.. parsed-literal::
+
+ v_mov_b32 v1, v0
+ ds_param_load v0, . . . wait_va_vdst:0
+
.. _amdgpu_synid_wait_vdst:
wait_vdst
@@ -1135,6 +1223,27 @@ The default value is zero. This is a safe value, but it may be suboptimal.
issuing this instruction.
================== ======================================================
+.. _amdgpu_synid_wait_vm_vsrc:
+
+wait_vm_vsrc
+~~~~~~~~~~~~
+
+Manually specify a wait on the VM_VSRC counter before issuing this instruction. VM_VSRC must be less
+than or equal to this value before the instruction is issued. If set to 1, no wait is performed.
+
+If unspecified the current default is zero. This is a safe value but may have poor performance characteristics.
+
+This modifier is a shorthand for the WAR hazard where VMEM reads a VGPR that is written by a parameter
+load.
+
+Examples:
+
+.. parsed-literal::
+
+ buffer_load_b32 v1, v0, s0, 0
+ ds_param_load v0, . . . wait_vm_vsrc:0
+
+
DPP8 Modifiers
--------------
diff --git a/llvm/docs/AMDGPUOperandSyntax.rst b/llvm/docs/AMDGPUOperandSyntax.rst
index e8a7632..722290f 100644
--- a/llvm/docs/AMDGPUOperandSyntax.rst
+++ b/llvm/docs/AMDGPUOperandSyntax.rst
@@ -479,6 +479,7 @@ High and low 32 bits of *xnack mask* may be accessed as separate registers:
.. _amdgpu_synid_vcc:
.. _amdgpu_synid_vcc_lo:
+.. _amdgpu_synid_vcc_hi:
vcc
---
@@ -523,6 +524,8 @@ including register indexing and bounds checking.
=========== ===================================================
.. _amdgpu_synid_exec:
+.. _amdgpu_synid_exec_lo:
+.. _amdgpu_synid_exec_hi:
exec
----
@@ -752,6 +755,14 @@ or an :ref:`absolute expression<amdgpu_synid_absolute_expression>`.
The value must be in the range -0x100000..0x0FFFFF.
+.. _amdgpu_synid_simm8:
+
+simm8
+-----
+
+An 8-bit :ref:`integer number<amdgpu_synid_integer_number>`
+or an :ref:`absolute expression<amdgpu_synid_absolute_expression>`.
+
.. _amdgpu_synid_off:
off
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 74b7604..a4d110f 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -22,6 +22,7 @@ User Guide for AMDGPU Backend
AMDGPU/AMDGPUAsmGFX1013
AMDGPU/AMDGPUAsmGFX1030
AMDGPU/AMDGPUAsmGFX11
+ AMDGPU/AMDGPUAsmGFX12
AMDGPUModifierSyntax
AMDGPUOperandSyntax
AMDGPUInstructionSyntax
@@ -19908,6 +19909,7 @@ in this description.
:doc:`gfx1102<AMDGPU/AMDGPUAsmGFX11>`
:doc:`gfx1103<AMDGPU/AMDGPUAsmGFX11>`
+ RDNA 4 :doc:`GFX12<AMDGPU/AMDGPUAsmGFX12>` :doc:`gfx1200<AMDGPU/AMDGPUAsmGFX12>`
============= ============================================= =======================================
For more information about instructions, their semantics and supported
diff --git a/llvm/docs/DirectXUsage.rst b/llvm/docs/DirectXUsage.rst
index 1d964e6..78f27d8 100644
--- a/llvm/docs/DirectXUsage.rst
+++ b/llvm/docs/DirectXUsage.rst
@@ -29,7 +29,7 @@ Initially the backend is aimed at supporting DirectX 12, and support for DirectX
11 is planned at a later date.
The DirectX backend is currently experimental and is not shipped with any
-release builds of LLVM tools. To enable building the DirectX backend locally add
+release builds of LLVM tools. To build the DirectX backend locally, add
``DirectX`` to the ``LLVM_EXPERIMENTAL_TARGETS_TO_BUILD`` CMake option. For more
information on building LLVM see the :doc:`CMake` documentation.
@@ -38,7 +38,7 @@ information on building LLVM see the :doc:`CMake` documentation.
Target Triples
==============
-At present the DirectX target only supports the ``dxil`` architecture, which
+At present, the DirectX target only supports the ``dxil`` architecture, which
generates code for the
`DirectX Intermediate Language. <https://github.com/microsoft/DirectXShaderCompiler/blob/main/docs/DXIL.rst>`_
@@ -46,8 +46,8 @@ In addition to target architecture, the DirectX backend also needs to know the
target runtime version and pipeline stage. These are expressed using the OS and
Environment triple component.
-Presently the DirectX backend requires targeting the ``shadermodel`` OS, and
-supports versions 6.0+ (at time of writing the latest announced version is 6.7).
+Presently, the DirectX backend requires targeting the ``shadermodel`` OS, and
+supports versions 6.0+ (as of writing, the latest announced version is 6.7).
.. table:: DirectX Environments
diff --git a/llvm/docs/GettingInvolved.rst b/llvm/docs/GettingInvolved.rst
index f7e1374..4b4b09a 100644
--- a/llvm/docs/GettingInvolved.rst
+++ b/llvm/docs/GettingInvolved.rst
@@ -213,6 +213,16 @@ what to add to your calendar invite.
- `ics <https://calendar.google.com/calendar/ical/c_fe5774fa2769c5085d6b87e8fac272e8940e7d0089bc0e0a58dc3ead7978504b%40group.calendar.google.com/public/basic.ics>`__
`gcal <https://calendar.google.com/calendar/embed?src=c_fe5774fa2769c5085d6b87e8fac272e8940e7d0089bc0e0a58dc3ead7978504b%40group.calendar.google.com&ctz=Asia%2FTokyo>`__
- `Minutes/docs <https://discourse.llvm.org/t/llvm-qualification-wg-sync-ups-meeting-minutes/87148>`__
+ * - MLIR C/C++ Frontend Working Group
+ - Monthly, usually 1st Monday of the month
+ - `ics <https://calendar.google.com/calendar/ical/jvceakm3kbpku3f4jrsv1lkigo%40group.calendar.google.com/public/basic.ics>`__
+ `gcal <https://calendar.google.com/calendar/embed?src=jvceakm3kbpku3f4jrsv1lkigo%40group.calendar.google.com&ctz=America%2FLos_Angeles>`__
+ - `Minutes/docs <https://docs.google.com/document/d/1-flHK3TjQUrkSO2Fdt4webZ2zCyeXxpTLMiRQbMW7hE>`__
+ * - ClangIR Upstreaming Coordination Meeting
+ - Every 2 weeks on Mondays
+ - `ics <https://calendar.google.com/calendar/ical/c_673c6cd64474c0aff173bf8fa609559f93d654e0984d9d91d71abd32d28c0486%40group.calendar.google.com/public/basic.ics>`__
+ `gcal <https://calendar.google.com/calendar/embed?src=c_673c6cd64474c0aff173bf8fa609559f93d654e0984d9d91d71abd32d28c0486%40group.calendar.google.com&ctz=America%2FLos_Angeles>`__
+ -
For event owners, our Discord bot also supports sending automated announcements
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 8e86393..22b58bf 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -1489,6 +1489,8 @@ Currently, only the following parameter attributes are defined:
function, returning a pointer to allocated storage disjoint from the
storage for any other object accessible to the caller.
+.. _captures_attr:
+
``captures(...)``
This attribute restricts the ways in which the callee may capture the
pointer. This is not a valid attribute for return values. This attribute
@@ -7543,6 +7545,33 @@ The number of bytes known to be dereferenceable is specified by the integer
value in the metadata node. This is analogous to the ''dereferenceable_or_null''
attribute on parameters and return values.
+'``captures``' Metadata
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``!captures`` metadata can only be applied to ``store`` instructions with
+a pointer-typed value operand. It restricts the capturing behavior of the store
+value operand in the same way the ``captures(...)`` attribute would do on a
+call. See the :ref:`pointer capture section <pointercapture>` for a detailed
+discussion of capture semantics.
+
+The ``!captures`` metadata accepts a non-empty list of strings from the same
+set as the :ref:`captures attribute <captures_attr>`:
+``!"address"``, ``!"address_is_null"``, ``!"provenance"`` and
+``!"read_provenance"``. ``!"none"`` is not supported.
+
+For example ``store ptr %x, ptr %y, !captures !{!"address"}`` indicates that
+the copy of pointer ``%x`` stored to location ``%y`` will only be used to
+inspect its integral address value, and not dereferenced. Dereferencing the
+pointer would result in undefined behavior.
+
+Similarly ``store ptr %x, ptr %y, !captures !{!"address", !"read_provenance"}``
+indicates that while reads through the stored pointer are allowed, writes would
+result in undefined behavior.
+
+The ``!captures`` attribute makes no statement about other uses of ``%x``, or
+uses of the stored-to memory location after it has been overwritten with a
+different value.
+
.. _llvm.loop:
'``llvm.loop``'
diff --git a/llvm/docs/TableGen/ProgRef.rst b/llvm/docs/TableGen/ProgRef.rst
index 2b1af05..0ff4cc7 100644
--- a/llvm/docs/TableGen/ProgRef.rst
+++ b/llvm/docs/TableGen/ProgRef.rst
@@ -64,7 +64,7 @@ Classes and concrete records have a unique *name*, either chosen by
the programmer or generated by TableGen. Associated with that name
is a list of *fields* with values and an optional list of *parent classes*
(sometimes called base or super classes). The fields are the primary data that
-backends will process. Note that TableGen assigns no meanings to fields; the
+backends will process. Note that TableGen assigns no meaning to fields; the
meanings are entirely up to the backends and the programs that incorporate
the output of those backends.
@@ -243,7 +243,7 @@ Include files
-------------
TableGen has an include mechanism. The content of the included file
-lexically replaces the ``include`` directive and is then parsed as if it was
+lexically replaces the ``include`` directive and is then parsed as if it were
originally in the main file.
.. productionlist::
@@ -670,17 +670,17 @@ name of a multiclass.
The argument values can be specified in two forms:
* Positional argument (``value``). The value is assigned to the argument in the
- corresponding position. For ``Foo<a0, a1>``, ``a0`` will be assigned to first
- argument and ``a1`` will be assigned to second argument.
+ corresponding position. For ``Foo<a0, a1>``, ``a0`` will be assigned to the first
+ argument and ``a1`` will be assigned to the second argument.
* Named argument (``name=value``). The value is assigned to the argument with
the specified name. For ``Foo<a=a0, b=a1>``, ``a0`` will be assigned to the
argument with name ``a`` and ``a1`` will be assigned to the argument with
name ``b``.
-Required arguments can also be specified as named argument.
+Required arguments can also be specified as a named argument.
Note that the argument can only be specified once regardless of the way (named
-or positional) to specify and positional arguments should be put before named
+or positional) to specify and positional arguments should precede named
arguments.
.. productionlist::
@@ -817,7 +817,7 @@ type. It provides a single field, ``Value``, which holds a 3-bit number. Its
template argument, ``val``, is used to set the ``Value`` field. Each of the
eight records is defined with ``FPFormat`` as its parent class. The
enumeration value is passed in angle brackets as the template argument. Each
-record will inherent the ``Value`` field with the appropriate enumeration
+record will inherit the ``Value`` field with the appropriate enumeration
value.
Here is a more complex example of classes with template arguments. First, we
@@ -1308,7 +1308,7 @@ with ``F0``, ``F1``, ``F2``, and ``F3``.
-------------------------------------
A ``dump`` statement prints the input string to standard error
-output. It is intended for debugging purpose.
+output. It is intended for debugging purposes.
* At top level, the message is printed immediately.
@@ -1727,7 +1727,7 @@ and non-0 as true.
``!div(``\ *a*\ ``,`` *b*\ ``)``
This operator performs signed division of *a* by *b*, and produces the quotient.
- Division by 0 produces an error. Division of INT64_MIN by -1 produces an error.
+ Division by 0 produces an error. Division of ``INT64_MIN`` by -1 produces an error.
``!empty(``\ *a*\ ``)``
This operator produces 1 if the string, list, or DAG *a* is empty; 0 otherwise.
@@ -1914,7 +1914,7 @@ and non-0 as true.
``!or(``\ *a*\ ``,`` *b*\ ``, ...)``
This operator does a bitwise OR on *a*, *b*, etc., and produces the
result. A logical OR can be performed if all the arguments are either
- 0 or 1. This operator is short-circuit to -1 (all ones) the left-most
+ 0 or 1. This operator is short-circuit to -1 (all ones) when the left-most
operand is -1.
``!range([``\ *start*\ ``,]`` *end*\ ``[,``\ *step*\ ``])``
@@ -1937,7 +1937,7 @@ and non-0 as true.
Equivalent to ``!range(0, !size(list))``.
``!repr(``\ *value*\ ``)``
- Represents *value* as a string. String format for the value is not
+ Represents *value* as a string. The string format for the value is not
guaranteed to be stable. Intended for debugging purposes only.
``!setdagarg(``\ *dag*\ ``,``\ *key*\ ``,``\ *arg*\ ``)``
diff --git a/llvm/include/llvm/ADT/BitVector.h b/llvm/include/llvm/ADT/BitVector.h
index 83350e6..9e81a4b 100644
--- a/llvm/include/llvm/ADT/BitVector.h
+++ b/llvm/include/llvm/ADT/BitVector.h
@@ -570,10 +570,7 @@ public:
template <class F, class... ArgTys>
static BitVector &apply(F &&f, BitVector &Out, BitVector const &Arg,
ArgTys const &...Args) {
- assert(llvm::all_of(
- std::initializer_list<unsigned>{Args.size()...},
- [&Arg](auto const &BV) { return Arg.size() == BV; }) &&
- "consistent sizes");
+ assert(((Arg.size() == Args.size()) && ...) && "consistent sizes");
Out.resize(Arg.size());
for (size_type I = 0, E = Arg.Bits.size(); I != E; ++I)
Out.Bits[I] = f(Arg.Bits[I], Args.Bits[I]...);
diff --git a/llvm/include/llvm/ADT/ConcurrentHashtable.h b/llvm/include/llvm/ADT/ConcurrentHashtable.h
index 6de194d..6a943c5 100644
--- a/llvm/include/llvm/ADT/ConcurrentHashtable.h
+++ b/llvm/include/llvm/ADT/ConcurrentHashtable.h
@@ -253,9 +253,8 @@ public:
OS << "\nOverall number of entries = " << OverallNumberOfEntries;
OS << "\nOverall number of non empty buckets = " << NumberOfNonEmptyBuckets;
- for (auto &BucketSize : BucketSizesMap)
- OS << "\n Number of buckets with size " << BucketSize.first << ": "
- << BucketSize.second;
+ for (auto [Size, Count] : BucketSizesMap)
+ OS << "\n Number of buckets with size " << Size << ": " << Count;
std::stringstream stream;
stream << std::fixed << std::setprecision(2)
diff --git a/llvm/include/llvm/ADT/DirectedGraph.h b/llvm/include/llvm/ADT/DirectedGraph.h
index 83c0bea..fb6b180 100644
--- a/llvm/include/llvm/ADT/DirectedGraph.h
+++ b/llvm/include/llvm/ADT/DirectedGraph.h
@@ -181,16 +181,6 @@ public:
DirectedGraph() = default;
explicit DirectedGraph(NodeType &N) : Nodes() { addNode(N); }
- DirectedGraph(const DGraphType &G) : Nodes(G.Nodes) {}
- DirectedGraph(DGraphType &&RHS) : Nodes(std::move(RHS.Nodes)) {}
- DGraphType &operator=(const DGraphType &G) {
- Nodes = G.Nodes;
- return *this;
- }
- DGraphType &operator=(const DGraphType &&G) {
- Nodes = std::move(G.Nodes);
- return *this;
- }
const_iterator begin() const { return Nodes.begin(); }
const_iterator end() const { return Nodes.end(); }
diff --git a/llvm/include/llvm/ADT/IntervalTree.h b/llvm/include/llvm/ADT/IntervalTree.h
index 918c862..d14de06 100644
--- a/llvm/include/llvm/ADT/IntervalTree.h
+++ b/llvm/include/llvm/ADT/IntervalTree.h
@@ -236,8 +236,7 @@ public:
//===----------------------------------------------------------------------===//
// Helper class template that is used by the IntervalTree to ensure that one
// does instantiate using only fundamental and/or pointer types.
-template <typename T>
-using PointTypeIsValid = std::bool_constant<std::is_fundamental<T>::value>;
+template <typename T> using PointTypeIsValid = std::is_fundamental<T>;
template <typename T>
using ValueTypeIsValid = std::bool_constant<std::is_fundamental<T>::value ||
diff --git a/llvm/include/llvm/ADT/SmallPtrSet.h b/llvm/include/llvm/ADT/SmallPtrSet.h
index e24cd641..f588a77 100644
--- a/llvm/include/llvm/ADT/SmallPtrSet.h
+++ b/llvm/include/llvm/ADT/SmallPtrSet.h
@@ -476,18 +476,20 @@ public:
}
[[nodiscard]] iterator begin() const {
- if (shouldReverseIterate())
+ if constexpr (shouldReverseIterate())
return makeIterator(EndPointer() - 1);
- return makeIterator(CurArray);
+ else
+ return makeIterator(CurArray);
}
[[nodiscard]] iterator end() const { return makeIterator(EndPointer()); }
private:
/// Create an iterator that dereferences to same place as the given pointer.
iterator makeIterator(const void *const *P) const {
- if (shouldReverseIterate())
+ if constexpr (shouldReverseIterate())
return iterator(P == EndPointer() ? CurArray : P + 1, CurArray, *this);
- return iterator(P, EndPointer(), *this);
+ else
+ return iterator(P, EndPointer(), *this);
}
};
diff --git a/llvm/include/llvm/ADT/SmallVector.h b/llvm/include/llvm/ADT/SmallVector.h
index 77805f5..efae6f3 100644
--- a/llvm/include/llvm/ADT/SmallVector.h
+++ b/llvm/include/llvm/ADT/SmallVector.h
@@ -502,25 +502,22 @@ protected:
/// Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
- template<typename It1, typename It2>
+ template <typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
- // Arbitrary iterator types; just use the basic implementation.
- std::uninitialized_copy(I, E, Dest);
- }
-
- /// Copy the range [I, E) onto the uninitialized memory
- /// starting with "Dest", constructing elements into it as needed.
- template <typename T1, typename T2>
- static void uninitialized_copy(
- T1 *I, T1 *E, T2 *Dest,
- std::enable_if_t<std::is_same<std::remove_const_t<T1>, T2>::value> * =
- nullptr) {
- // Use memcpy for PODs iterated by pointers (which includes SmallVector
- // iterators): std::uninitialized_copy optimizes to memmove, but we can
- // use memcpy here. Note that I and E are iterators and thus might be
- // invalid for memcpy if they are equal.
- if (I != E)
- std::memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
+ if constexpr (std::is_pointer_v<It1> && std::is_pointer_v<It2> &&
+ std::is_same_v<
+ std::remove_const_t<std::remove_pointer_t<It1>>,
+ std::remove_pointer_t<It2>>) {
+ // Use memcpy for PODs iterated by pointers (which includes SmallVector
+ // iterators): std::uninitialized_copy optimizes to memmove, but we can
+ // use memcpy here. Note that I and E are iterators and thus might be
+ // invalid for memcpy if they are equal.
+ if (I != E)
+ std::memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
+ } else {
+ // Arbitrary iterator types; just use the basic implementation.
+ std::uninitialized_copy(I, E, Dest);
+ }
}
/// Double the size of the allocated memory, guaranteeing space for at
diff --git a/llvm/include/llvm/Analysis/IR2Vec.h b/llvm/include/llvm/Analysis/IR2Vec.h
index 3671c1c..ed43f19 100644
--- a/llvm/include/llvm/Analysis/IR2Vec.h
+++ b/llvm/include/llvm/Analysis/IR2Vec.h
@@ -36,6 +36,7 @@
#define LLVM_ANALYSIS_IR2VEC_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/CommandLine.h"
@@ -44,6 +45,7 @@
#include "llvm/Support/JSON.h"
#include <array>
#include <map>
+#include <optional>
namespace llvm {
@@ -143,6 +145,80 @@ public:
using InstEmbeddingsMap = DenseMap<const Instruction *, Embedding>;
using BBEmbeddingsMap = DenseMap<const BasicBlock *, Embedding>;
+/// Generic storage class for section-based vocabularies.
+/// VocabStorage provides a generic foundation for storing and accessing
+/// embeddings organized into sections.
+class VocabStorage {
+private:
+ /// Section-based storage
+ std::vector<std::vector<Embedding>> Sections;
+
+ const size_t TotalSize;
+ const unsigned Dimension;
+
+public:
+ /// Default constructor creates empty storage (invalid state)
+ VocabStorage() : Sections(), TotalSize(0), Dimension(0) {}
+
+ /// Create a VocabStorage with pre-organized section data
+ VocabStorage(std::vector<std::vector<Embedding>> &&SectionData);
+
+ VocabStorage(VocabStorage &&) = default;
+ VocabStorage &operator=(VocabStorage &&) = delete;
+
+ VocabStorage(const VocabStorage &) = delete;
+ VocabStorage &operator=(const VocabStorage &) = delete;
+
+ /// Get total number of entries across all sections
+ size_t size() const { return TotalSize; }
+
+ /// Get number of sections
+ unsigned getNumSections() const {
+ return static_cast<unsigned>(Sections.size());
+ }
+
+ /// Section-based access: Storage[sectionId][localIndex]
+ const std::vector<Embedding> &operator[](unsigned SectionId) const {
+ assert(SectionId < Sections.size() && "Invalid section ID");
+ return Sections[SectionId];
+ }
+
+ /// Get vocabulary dimension
+ unsigned getDimension() const { return Dimension; }
+
+ /// Check if vocabulary is valid (has data)
+ bool isValid() const { return TotalSize > 0; }
+
+ /// Iterator support for section-based access
+ class const_iterator {
+ const VocabStorage *Storage;
+ unsigned SectionId = 0;
+ size_t LocalIndex = 0;
+
+ public:
+ const_iterator(const VocabStorage *Storage, unsigned SectionId,
+ size_t LocalIndex)
+ : Storage(Storage), SectionId(SectionId), LocalIndex(LocalIndex) {}
+
+ LLVM_ABI const Embedding &operator*() const;
+ LLVM_ABI const_iterator &operator++();
+ LLVM_ABI bool operator==(const const_iterator &Other) const;
+ LLVM_ABI bool operator!=(const const_iterator &Other) const;
+ };
+
+ const_iterator begin() const { return const_iterator(this, 0, 0); }
+ const_iterator end() const {
+ return const_iterator(this, getNumSections(), 0);
+ }
+
+ using VocabMap = std::map<std::string, Embedding>;
+ /// Parse a vocabulary section from JSON and populate the target vocabulary
+ /// map.
+ static Error parseVocabSection(StringRef Key,
+ const json::Value &ParsedVocabValue,
+ VocabMap &TargetVocab, unsigned &Dim);
+};
+
/// Class for storing and accessing the IR2Vec vocabulary.
/// The Vocabulary class manages seed embeddings for LLVM IR entities. The
/// seed embeddings are the initial learned representations of the entities
@@ -162,15 +238,42 @@ using BBEmbeddingsMap = DenseMap<const BasicBlock *, Embedding>;
/// embeddings.
class Vocabulary {
friend class llvm::IR2VecVocabAnalysis;
- using VocabVector = std::vector<ir2vec::Embedding>;
- VocabVector Vocab;
-public:
- // Slot layout:
- // [0 .. MaxOpcodes-1] => Instruction opcodes
- // [MaxOpcodes .. MaxOpcodes+MaxCanonicalTypeIDs-1] => Canonicalized types
- // [MaxOpcodes+MaxCanonicalTypeIDs .. NumCanonicalEntries-1] => Operand kinds
+ // Vocabulary Layout:
+ // +----------------+------------------------------------------------------+
+ // | Entity Type | Index Range |
+ // +----------------+------------------------------------------------------+
+ // | Opcodes | [0 .. (MaxOpcodes-1)] |
+ // | Canonical Types| [MaxOpcodes .. (MaxOpcodes+MaxCanonicalTypeIDs-1)] |
+ // | Operands | [(MaxOpcodes+MaxCanonicalTypeIDs) .. NumCanEntries] |
+ // +----------------+------------------------------------------------------+
+ // Note: MaxOpcodes is the number of unique opcodes supported by LLVM IR.
+ // MaxCanonicalTypeIDs is the number of canonicalized type IDs.
+ // "Similar" LLVM Types are grouped/canonicalized together. E.g., all
+ // float variants (FloatTy, DoubleTy, HalfTy, etc.) map to
+ // CanonicalTypeID::FloatTy. This helps reduce the vocabulary size
+ // and improves learning. Operands include Comparison predicates
+ // (ICmp/FCmp) along with other operand types. This can be extended to
+ // include other specializations in future.
+ enum class Section : unsigned {
+ Opcodes = 0,
+ CanonicalTypes = 1,
+ Operands = 2,
+ Predicates = 3,
+ MaxSections
+ };
+
+ // Use section-based storage for better organization and efficiency
+ VocabStorage Storage;
+
+ static constexpr unsigned NumICmpPredicates =
+ static_cast<unsigned>(CmpInst::LAST_ICMP_PREDICATE) -
+ static_cast<unsigned>(CmpInst::FIRST_ICMP_PREDICATE) + 1;
+ static constexpr unsigned NumFCmpPredicates =
+ static_cast<unsigned>(CmpInst::LAST_FCMP_PREDICATE) -
+ static_cast<unsigned>(CmpInst::FIRST_FCMP_PREDICATE) + 1;
+public:
/// Canonical type IDs supported by IR2Vec Vocabulary
enum class CanonicalTypeID : unsigned {
FloatTy,
@@ -207,59 +310,114 @@ public:
static_cast<unsigned>(CanonicalTypeID::MaxCanonicalType);
static constexpr unsigned MaxOperandKinds =
static_cast<unsigned>(OperandKind::MaxOperandKind);
+ // CmpInst::Predicate has gaps. We want the vocabulary to be dense without
+ // empty slots.
+ static constexpr unsigned MaxPredicateKinds =
+ NumICmpPredicates + NumFCmpPredicates;
Vocabulary() = default;
- LLVM_ABI Vocabulary(VocabVector &&Vocab) : Vocab(std::move(Vocab)) {}
+ LLVM_ABI Vocabulary(VocabStorage &&Storage) : Storage(std::move(Storage)) {}
+
+ Vocabulary(const Vocabulary &) = delete;
+ Vocabulary &operator=(const Vocabulary &) = delete;
- LLVM_ABI bool isValid() const { return Vocab.size() == NumCanonicalEntries; };
- LLVM_ABI unsigned getDimension() const;
- /// Total number of entries (opcodes + canonicalized types + operand kinds)
+ Vocabulary(Vocabulary &&) = default;
+ Vocabulary &operator=(Vocabulary &&Other) = delete;
+
+ LLVM_ABI bool isValid() const {
+ return Storage.size() == NumCanonicalEntries;
+ }
+
+ LLVM_ABI unsigned getDimension() const {
+ assert(isValid() && "IR2Vec Vocabulary is invalid");
+ return Storage.getDimension();
+ }
+
+ /// Total number of entries (opcodes + canonicalized types + operand kinds +
+ /// predicates)
static constexpr size_t getCanonicalSize() { return NumCanonicalEntries; }
/// Function to get vocabulary key for a given Opcode
LLVM_ABI static StringRef getVocabKeyForOpcode(unsigned Opcode);
/// Function to get vocabulary key for a given TypeID
- LLVM_ABI static StringRef getVocabKeyForTypeID(Type::TypeID TypeID);
+ LLVM_ABI static StringRef getVocabKeyForTypeID(Type::TypeID TypeID) {
+ return getVocabKeyForCanonicalTypeID(getCanonicalTypeID(TypeID));
+ }
/// Function to get vocabulary key for a given OperandKind
- LLVM_ABI static StringRef getVocabKeyForOperandKind(OperandKind Kind);
+ LLVM_ABI static StringRef getVocabKeyForOperandKind(OperandKind Kind) {
+ unsigned Index = static_cast<unsigned>(Kind);
+ assert(Index < MaxOperandKinds && "Invalid OperandKind");
+ return OperandKindNames[Index];
+ }
/// Function to classify an operand into OperandKind
LLVM_ABI static OperandKind getOperandKind(const Value *Op);
- /// Functions to return the slot index or position of a given Opcode, TypeID,
- /// or OperandKind in the vocabulary.
- LLVM_ABI static unsigned getSlotIndex(unsigned Opcode);
- LLVM_ABI static unsigned getSlotIndex(Type::TypeID TypeID);
- LLVM_ABI static unsigned getSlotIndex(const Value &Op);
+ /// Function to get vocabulary key for a given predicate
+ LLVM_ABI static StringRef getVocabKeyForPredicate(CmpInst::Predicate P);
+
+ /// Functions to return flat index
+ LLVM_ABI static unsigned getIndex(unsigned Opcode) {
+ assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode");
+ return Opcode - 1; // Convert to zero-based index
+ }
+
+ LLVM_ABI static unsigned getIndex(Type::TypeID TypeID) {
+ assert(static_cast<unsigned>(TypeID) < MaxTypeIDs && "Invalid type ID");
+ return MaxOpcodes + static_cast<unsigned>(getCanonicalTypeID(TypeID));
+ }
+
+ LLVM_ABI static unsigned getIndex(const Value &Op) {
+ unsigned Index = static_cast<unsigned>(getOperandKind(&Op));
+ assert(Index < MaxOperandKinds && "Invalid OperandKind");
+ return OperandBaseOffset + Index;
+ }
+
+ LLVM_ABI static unsigned getIndex(CmpInst::Predicate P) {
+ return PredicateBaseOffset + getPredicateLocalIndex(P);
+ }
/// Accessors to get the embedding for a given entity.
- LLVM_ABI const ir2vec::Embedding &operator[](unsigned Opcode) const;
- LLVM_ABI const ir2vec::Embedding &operator[](Type::TypeID TypeId) const;
- LLVM_ABI const ir2vec::Embedding &operator[](const Value &Arg) const;
+ LLVM_ABI const ir2vec::Embedding &operator[](unsigned Opcode) const {
+ assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode");
+ return Storage[static_cast<unsigned>(Section::Opcodes)][Opcode - 1];
+ }
+
+ LLVM_ABI const ir2vec::Embedding &operator[](Type::TypeID TypeID) const {
+ assert(static_cast<unsigned>(TypeID) < MaxTypeIDs && "Invalid type ID");
+ unsigned LocalIndex = static_cast<unsigned>(getCanonicalTypeID(TypeID));
+ return Storage[static_cast<unsigned>(Section::CanonicalTypes)][LocalIndex];
+ }
+
+ LLVM_ABI const ir2vec::Embedding &operator[](const Value &Arg) const {
+ unsigned LocalIndex = static_cast<unsigned>(getOperandKind(&Arg));
+ assert(LocalIndex < MaxOperandKinds && "Invalid OperandKind");
+ return Storage[static_cast<unsigned>(Section::Operands)][LocalIndex];
+ }
+
+ LLVM_ABI const ir2vec::Embedding &operator[](CmpInst::Predicate P) const {
+ unsigned LocalIndex = getPredicateLocalIndex(P);
+ return Storage[static_cast<unsigned>(Section::Predicates)][LocalIndex];
+ }
/// Const Iterator type aliases
- using const_iterator = VocabVector::const_iterator;
+ using const_iterator = VocabStorage::const_iterator;
+
const_iterator begin() const {
assert(isValid() && "IR2Vec Vocabulary is invalid");
- return Vocab.begin();
+ return Storage.begin();
}
- const_iterator cbegin() const {
- assert(isValid() && "IR2Vec Vocabulary is invalid");
- return Vocab.cbegin();
- }
+ const_iterator cbegin() const { return begin(); }
const_iterator end() const {
assert(isValid() && "IR2Vec Vocabulary is invalid");
- return Vocab.end();
+ return Storage.end();
}
- const_iterator cend() const {
- assert(isValid() && "IR2Vec Vocabulary is invalid");
- return Vocab.cend();
- }
+ const_iterator cend() const { return end(); }
/// Returns the string key for a given index position in the vocabulary.
/// This is useful for debugging or printing the vocabulary. Do not use this
@@ -267,14 +425,24 @@ public:
LLVM_ABI static StringRef getStringKey(unsigned Pos);
/// Create a dummy vocabulary for testing purposes.
- LLVM_ABI static VocabVector createDummyVocabForTest(unsigned Dim = 1);
+ LLVM_ABI static VocabStorage createDummyVocabForTest(unsigned Dim = 1);
LLVM_ABI bool invalidate(Module &M, const PreservedAnalyses &PA,
ModuleAnalysisManager::Invalidator &Inv) const;
private:
constexpr static unsigned NumCanonicalEntries =
- MaxOpcodes + MaxCanonicalTypeIDs + MaxOperandKinds;
+ MaxOpcodes + MaxCanonicalTypeIDs + MaxOperandKinds + MaxPredicateKinds;
+
+ // Base offsets for flat index computation
+ constexpr static unsigned OperandBaseOffset =
+ MaxOpcodes + MaxCanonicalTypeIDs;
+ constexpr static unsigned PredicateBaseOffset =
+ OperandBaseOffset + MaxOperandKinds;
+
+ /// Functions for predicate index calculations
+ static unsigned getPredicateLocalIndex(CmpInst::Predicate P);
+ static CmpInst::Predicate getPredicateFromLocalIndex(unsigned LocalIndex);
/// String mappings for CanonicalTypeID values
static constexpr StringLiteral CanonicalTypeNames[] = {
@@ -322,10 +490,26 @@ private:
/// Function to get vocabulary key for canonical type by enum
LLVM_ABI static StringRef
- getVocabKeyForCanonicalTypeID(CanonicalTypeID CType);
+ getVocabKeyForCanonicalTypeID(CanonicalTypeID CType) {
+ unsigned Index = static_cast<unsigned>(CType);
+ assert(Index < MaxCanonicalTypeIDs && "Invalid CanonicalTypeID");
+ return CanonicalTypeNames[Index];
+ }
/// Function to convert TypeID to CanonicalTypeID
- LLVM_ABI static CanonicalTypeID getCanonicalTypeID(Type::TypeID TypeID);
+ LLVM_ABI static CanonicalTypeID getCanonicalTypeID(Type::TypeID TypeID) {
+ unsigned Index = static_cast<unsigned>(TypeID);
+ assert(Index < MaxTypeIDs && "Invalid TypeID");
+ return TypeIDMapping[Index];
+ }
+
+ /// Function to get the predicate enum value for a given index. Index is
+ /// relative to the predicates section of the vocabulary. E.g., Index 0
+ /// corresponds to the first predicate.
+ LLVM_ABI static CmpInst::Predicate getPredicate(unsigned Index) {
+ assert(Index < MaxPredicateKinds && "Invalid predicate index");
+ return getPredicateFromLocalIndex(Index);
+ }
};
/// Embedder provides the interface to generate embeddings (vector
@@ -418,22 +602,20 @@ public:
/// mapping between an entity of the IR (like opcode, type, argument, etc.) and
/// its corresponding embedding.
class IR2VecVocabAnalysis : public AnalysisInfoMixin<IR2VecVocabAnalysis> {
- using VocabVector = std::vector<ir2vec::Embedding>;
using VocabMap = std::map<std::string, ir2vec::Embedding>;
- VocabMap OpcVocab, TypeVocab, ArgVocab;
- VocabVector Vocab;
+ std::optional<ir2vec::VocabStorage> Vocab;
- Error readVocabulary();
- Error parseVocabSection(StringRef Key, const json::Value &ParsedVocabValue,
- VocabMap &TargetVocab, unsigned &Dim);
- void generateNumMappedVocab();
+ Error readVocabulary(VocabMap &OpcVocab, VocabMap &TypeVocab,
+ VocabMap &ArgVocab);
+ void generateVocabStorage(VocabMap &OpcVocab, VocabMap &TypeVocab,
+ VocabMap &ArgVocab);
void emitError(Error Err, LLVMContext &Ctx);
public:
LLVM_ABI static AnalysisKey Key;
IR2VecVocabAnalysis() = default;
- LLVM_ABI explicit IR2VecVocabAnalysis(const VocabVector &Vocab);
- LLVM_ABI explicit IR2VecVocabAnalysis(VocabVector &&Vocab);
+ LLVM_ABI explicit IR2VecVocabAnalysis(ir2vec::VocabStorage &&Vocab)
+ : Vocab(std::move(Vocab)) {}
using Result = ir2vec::Vocabulary;
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &MAM);
};
diff --git a/llvm/include/llvm/Analysis/MemoryProfileInfo.h b/llvm/include/llvm/Analysis/MemoryProfileInfo.h
index be690a4..571caf9 100644
--- a/llvm/include/llvm/Analysis/MemoryProfileInfo.h
+++ b/llvm/include/llvm/Analysis/MemoryProfileInfo.h
@@ -59,14 +59,6 @@ LLVM_ABI std::string getAllocTypeAttributeString(AllocationType Type);
/// True if the AllocTypes bitmask contains just a single type.
LLVM_ABI bool hasSingleAllocType(uint8_t AllocTypes);
-/// Removes any existing "ambiguous" memprof attribute. Called before we apply a
-/// specific allocation type such as "cold", "notcold", or "hot".
-LLVM_ABI void removeAnyExistingAmbiguousAttribute(CallBase *CB);
-
-/// Adds an "ambiguous" memprof attribute to call with a matched allocation
-/// profile but that we haven't yet been able to disambiguate.
-LLVM_ABI void addAmbiguousAttribute(CallBase *CB);
-
/// Class to build a trie of call stack contexts for a particular profiled
/// allocation call, along with their associated allocation types.
/// The allocation will be at the root of the trie, which is then used to
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
index 7a45ae9..164b46b 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
@@ -184,6 +184,7 @@ m_scev_PtrToInt(const Op0_t &Op0) {
/// Match a binary SCEV.
template <typename SCEVTy, typename Op0_t, typename Op1_t,
+ SCEV::NoWrapFlags WrapFlags = SCEV::FlagAnyWrap,
bool Commutable = false>
struct SCEVBinaryExpr_match {
Op0_t Op0;
@@ -192,6 +193,10 @@ struct SCEVBinaryExpr_match {
SCEVBinaryExpr_match(Op0_t Op0, Op1_t Op1) : Op0(Op0), Op1(Op1) {}
bool match(const SCEV *S) const {
+ if (auto WrappingS = dyn_cast<SCEVNAryExpr>(S))
+ if (WrappingS->getNoWrapFlags(WrapFlags) != WrapFlags)
+ return false;
+
auto *E = dyn_cast<SCEVTy>(S);
return E && E->getNumOperands() == 2 &&
((Op0.match(E->getOperand(0)) && Op1.match(E->getOperand(1))) ||
@@ -201,10 +206,12 @@ struct SCEVBinaryExpr_match {
};
template <typename SCEVTy, typename Op0_t, typename Op1_t,
+ SCEV::NoWrapFlags WrapFlags = SCEV::FlagAnyWrap,
bool Commutable = false>
-inline SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, Commutable>
+inline SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, WrapFlags, Commutable>
m_scev_Binary(const Op0_t &Op0, const Op1_t &Op1) {
- return SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, Commutable>(Op0, Op1);
+ return SCEVBinaryExpr_match<SCEVTy, Op0_t, Op1_t, WrapFlags, Commutable>(Op0,
+ Op1);
}
template <typename Op0_t, typename Op1_t>
@@ -220,9 +227,17 @@ m_scev_Mul(const Op0_t &Op0, const Op1_t &Op1) {
}
template <typename Op0_t, typename Op1_t>
-inline SCEVBinaryExpr_match<SCEVMulExpr, Op0_t, Op1_t, true>
+inline SCEVBinaryExpr_match<SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true>
m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1) {
- return m_scev_Binary<SCEVMulExpr, Op0_t, Op1_t, true>(Op0, Op1);
+ return m_scev_Binary<SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true>(Op0,
+ Op1);
+}
+
+template <typename Op0_t, typename Op1_t>
+inline SCEVBinaryExpr_match<SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true>
+m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1) {
+ return m_scev_Binary<SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true>(Op0,
+ Op1);
}
template <typename Op0_t, typename Op1_t>
diff --git a/llvm/include/llvm/BinaryFormat/DXContainer.h b/llvm/include/llvm/BinaryFormat/DXContainer.h
index 08a7ddb..8944e736 100644
--- a/llvm/include/llvm/BinaryFormat/DXContainer.h
+++ b/llvm/include/llvm/BinaryFormat/DXContainer.h
@@ -844,6 +844,7 @@ struct StaticSampler : public v1::StaticSampler {
enum class RootSignatureVersion {
V1_0 = 0x1,
V1_1 = 0x2,
+ V1_2 = 0x3,
};
} // namespace dxbc
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 22569aa..c0e426c 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -300,6 +300,10 @@ private:
Type *OpType,
LostDebugLocObserver &LocObserver);
+ LegalizeResult emitModfLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
+ unsigned Size, Type *OpType,
+ LostDebugLocObserver &LocObserver);
+
public:
/// Return the alignment to use for a stack temporary object with the given
/// type.
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 0b6033b..40c7792 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -2184,6 +2184,13 @@ public:
return buildInstr(TargetOpcode::G_FSINCOS, {Sin, Cos}, {Src}, Flags);
}
+ /// Build and insert \p Fract, \p Int = G_FMODF \p Src
+ MachineInstrBuilder buildModf(const DstOp &Fract, const DstOp &Int,
+ const SrcOp &Src,
+ std::optional<unsigned> Flags = std::nullopt) {
+ return buildInstr(TargetOpcode::G_FMODF, {Fract, Int}, {Src}, Flags);
+ }
+
/// Build and insert \p Res = G_FCOPYSIGN \p Op0, \p Op1
MachineInstrBuilder buildFCopysign(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1) {
diff --git a/llvm/include/llvm/CodeGen/MIRYamlMapping.h b/llvm/include/llvm/CodeGen/MIRYamlMapping.h
index c7304e3..e80c138 100644
--- a/llvm/include/llvm/CodeGen/MIRYamlMapping.h
+++ b/llvm/include/llvm/CodeGen/MIRYamlMapping.h
@@ -378,6 +378,8 @@ struct ScalarEnumerationTraits<TargetStackID::Value> {
IO.enumCase(ID, "default", TargetStackID::Default);
IO.enumCase(ID, "sgpr-spill", TargetStackID::SGPRSpill);
IO.enumCase(ID, "scalable-vector", TargetStackID::ScalableVector);
+ IO.enumCase(ID, "scalable-predicate-vector",
+ TargetStackID::ScalablePredicateVector);
IO.enumCase(ID, "wasm-local", TargetStackID::WasmLocal);
IO.enumCase(ID, "noalloc", TargetStackID::NoAlloc);
}
diff --git a/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index 00c7343..50ce931 100644
--- a/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -497,7 +497,18 @@ public:
/// Should this stack ID be considered in MaxAlignment.
bool contributesToMaxAlignment(uint8_t StackID) {
return StackID == TargetStackID::Default ||
- StackID == TargetStackID::ScalableVector;
+ StackID == TargetStackID::ScalableVector ||
+ StackID == TargetStackID::ScalablePredicateVector;
+ }
+
+ bool hasScalableStackID(int ObjectIdx) const {
+ uint8_t StackID = getStackID(ObjectIdx);
+ return isScalableStackID(StackID);
+ }
+
+ bool isScalableStackID(uint8_t StackID) const {
+ return StackID == TargetStackID::ScalableVector ||
+ StackID == TargetStackID::ScalablePredicateVector;
}
/// setObjectAlignment - Change the alignment of the specified stack object.
diff --git a/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/llvm/include/llvm/CodeGen/TargetFrameLowering.h
index 0e29e45..75696faf 100644
--- a/llvm/include/llvm/CodeGen/TargetFrameLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetFrameLowering.h
@@ -32,6 +32,7 @@ enum Value {
SGPRSpill = 1,
ScalableVector = 2,
WasmLocal = 3,
+ ScalablePredicateVector = 4,
NoAlloc = 255
};
}
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index c45e03a..7bbad17 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -480,13 +480,6 @@ public:
return true;
}
- /// Return true if the @llvm.vector.partial.reduce.* intrinsic
- /// should be expanded using generic code in SelectionDAGBuilder.
- virtual bool
- shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const {
- return true;
- }
-
/// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded
/// using generic code in SelectionDAGBuilder.
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const {
diff --git a/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h b/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h
index 52af205..ffe0b50 100644
--- a/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h
+++ b/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h
@@ -179,6 +179,7 @@ public:
class DWARFDataExtractorSimple
: public DWARFDataExtractorBase<DWARFDataExtractorSimple> {
+public:
using DWARFDataExtractorBase::DWARFDataExtractorBase;
LLVM_ABI uint64_t getRelocatedValueImpl(uint32_t Size, uint64_t *Off,
diff --git a/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h b/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h
index 87777fd..edee6a7 100644
--- a/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h
+++ b/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h
@@ -56,7 +56,8 @@ struct RootDescriptor {
return;
}
- assert(Version == llvm::dxbc::RootSignatureVersion::V1_1 &&
+ assert((Version == llvm::dxbc::RootSignatureVersion::V1_1 ||
+ Version == llvm::dxbc::RootSignatureVersion::V1_2) &&
"Specified an invalid root signature version");
switch (Type) {
case dxil::ResourceClass::CBuffer:
@@ -100,7 +101,8 @@ struct DescriptorTableClause {
return;
}
- assert(Version == dxbc::RootSignatureVersion::V1_1 &&
+ assert((Version == dxbc::RootSignatureVersion::V1_1 ||
+ Version == dxbc::RootSignatureVersion::V1_2) &&
"Specified an invalid root signature version");
switch (Type) {
case dxil::ResourceClass::CBuffer:
@@ -131,6 +133,7 @@ struct StaticSampler {
float MaxLOD = std::numeric_limits<float>::max();
uint32_t Space = 0;
dxbc::ShaderVisibility Visibility = dxbc::ShaderVisibility::All;
+ dxbc::StaticSamplerFlags Flags = dxbc::StaticSamplerFlags::None;
};
/// Models RootElement : RootFlags | RootConstants | RootParam
diff --git a/llvm/include/llvm/IR/FixedMetadataKinds.def b/llvm/include/llvm/IR/FixedMetadataKinds.def
index d09cc15..0603abc 100644
--- a/llvm/include/llvm/IR/FixedMetadataKinds.def
+++ b/llvm/include/llvm/IR/FixedMetadataKinds.def
@@ -55,3 +55,4 @@ LLVM_FIXED_MD_KIND(MD_mmra, "mmra", 40)
LLVM_FIXED_MD_KIND(MD_noalias_addrspace, "noalias.addrspace", 41)
LLVM_FIXED_MD_KIND(MD_callee_type, "callee_type", 42)
LLVM_FIXED_MD_KIND(MD_nofree, "nofree", 43)
+LLVM_FIXED_MD_KIND(MD_captures, "captures", 44)
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 95a0a7f..de7a237 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -32,6 +32,7 @@
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/ProfDataUtils.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/Support/AtomicOrdering.h"
@@ -3536,8 +3537,6 @@ class SwitchInstProfUpdateWrapper {
bool Changed = false;
protected:
- LLVM_ABI MDNode *buildProfBranchWeightsMD();
-
LLVM_ABI void init();
public:
@@ -3549,8 +3548,8 @@ public:
SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
~SwitchInstProfUpdateWrapper() {
- if (Changed)
- SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
+ if (Changed && Weights.has_value() && Weights->size() >= 2)
+ setBranchWeights(SI, Weights.value(), /*IsExpected=*/false);
}
/// Delegate the call to the underlying SwitchInst::removeCase() and remove
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index eb0440f..0622bfa 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -810,6 +810,26 @@ public:
/// Whether the intrinsic is signed or unsigned.
bool isSigned() const { return isSigned(getIntrinsicID()); };
+ /// Whether the intrinsic is a smin or umin.
+ static bool isMin(Intrinsic::ID ID) {
+ switch (ID) {
+ case Intrinsic::umin:
+ case Intrinsic::smin:
+ return true;
+ case Intrinsic::umax:
+ case Intrinsic::smax:
+ return false;
+ default:
+ llvm_unreachable("Invalid intrinsic");
+ }
+ }
+
+ /// Whether the intrinsic is a smin or a umin.
+ bool isMin() const { return isMin(getIntrinsicID()); }
+
+ /// Whether the intrinsic is a smax or a umax.
+ bool isMax() const { return !isMin(getIntrinsicID()); }
+
/// Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values,
/// so there is a certain threshold value, upon reaching which,
/// their value can no longer change. Return said threshold.
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 7c9aef5..fbc92d7 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -130,8 +130,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
class AdvSIMD_1VectorArg_Expand_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
- class AdvSIMD_1VectorArg_Long_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
class AdvSIMD_1IntArg_Narrow_Intrinsic
: DefaultAttrsIntrinsic<[llvm_any_ty], [llvm_any_ty], [IntrNoMem]>;
class AdvSIMD_1VectorArg_Narrow_Intrinsic
@@ -150,9 +148,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
class AdvSIMD_2VectorArg_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem]>;
- class AdvSIMD_2VectorArg_Compare_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
- [IntrNoMem]>;
class AdvSIMD_2Arg_FloatCompare_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
[IntrNoMem]>;
@@ -160,10 +155,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMTruncatedType<0>, LLVMTruncatedType<0>],
[IntrNoMem]>;
- class AdvSIMD_2VectorArg_Wide_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMTruncatedType<0>],
- [IntrNoMem]>;
class AdvSIMD_2VectorArg_Narrow_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMExtendedType<0>, LLVMExtendedType<0>],
@@ -172,10 +163,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyint_ty],
[LLVMExtendedType<0>, llvm_i32_ty],
[IntrNoMem]>;
- class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMTruncatedType<0>],
@@ -184,10 +171,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMTruncatedType<0>, llvm_i32_ty],
[IntrNoMem]>;
- class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMOneNthElementsVectorType<0, 2>, llvm_anyvector_ty],
- [IntrNoMem]>;
class AdvSIMD_2VectorArg_Lane_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, llvm_anyint_ty, llvm_i32_ty],
@@ -205,14 +188,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
[IntrNoMem]>;
- class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMOneNthElementsVectorType<0, 2>, llvm_anyvector_ty,
- LLVMMatchType<1>], [IntrNoMem]>;
- class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMOneNthElementsVectorType<0, 2>, llvm_anyvector_ty, llvm_i32_ty],
- [IntrNoMem]>;
class AdvSIMD_CvtFxToFP_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
[IntrNoMem]>;
@@ -238,11 +213,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
[LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
[IntrNoMem]>;
- class AdvSIMD_FML_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
- [IntrNoMem]>;
-
class AdvSIMD_BF16FML_Intrinsic
: DefaultAttrsIntrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty],
diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h
index 990bdc6..85a7f8f 100644
--- a/llvm/include/llvm/IR/Metadata.h
+++ b/llvm/include/llvm/IR/Metadata.h
@@ -41,6 +41,7 @@
namespace llvm {
+enum class CaptureComponents : uint8_t;
class Module;
class ModuleSlotTracker;
class raw_ostream;
@@ -1480,6 +1481,13 @@ public:
LLVM_ABI static MDNode *getMergedCallsiteMetadata(MDNode *A, MDNode *B);
LLVM_ABI static MDNode *getMergedCalleeTypeMetadata(const MDNode *A,
const MDNode *B);
+
+ /// Convert !captures metadata to CaptureComponents. MD may be nullptr.
+ LLVM_ABI static CaptureComponents toCaptureComponents(const MDNode *MD);
+ /// Convert CaptureComponents to !captures metadata. The return value may be
+ /// nullptr.
+ LLVM_ABI static MDNode *fromCaptureComponents(LLVMContext &Ctx,
+ CaptureComponents CC);
};
/// Tuple of metadata.
diff --git a/llvm/include/llvm/IR/ProfDataUtils.h b/llvm/include/llvm/IR/ProfDataUtils.h
index e97160e..a0876b1 100644
--- a/llvm/include/llvm/IR/ProfDataUtils.h
+++ b/llvm/include/llvm/IR/ProfDataUtils.h
@@ -145,7 +145,13 @@ LLVM_ABI bool extractProfTotalWeight(const Instruction &I,
/// \param Weights an array of weights to set on instruction I.
/// \param IsExpected were these weights added from an llvm.expect* intrinsic.
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights,
- bool IsExpected);
+ bool IsExpected, bool ElideAllZero = false);
+
+/// Variant of `setBranchWeights` where the `Weights` will be fit first to
+/// uint32_t by shifting right.
+LLVM_ABI void setFittedBranchWeights(Instruction &I, ArrayRef<uint64_t> Weights,
+ bool IsExpected,
+ bool ElideAllZero = false);
/// downscale the given weights preserving the ratio. If the maximum value is
/// not already known and not provided via \param KnownMaxCount , it will be
diff --git a/llvm/include/llvm/IR/ValueMap.h b/llvm/include/llvm/IR/ValueMap.h
index 1a11718..97653c2 100644
--- a/llvm/include/llvm/IR/ValueMap.h
+++ b/llvm/include/llvm/IR/ValueMap.h
@@ -42,18 +42,15 @@
namespace llvm {
-template<typename KeyT, typename ValueT, typename Config>
+template <typename KeyT, typename ValueT, typename Config>
class ValueMapCallbackVH;
-template<typename DenseMapT, typename KeyT>
-class ValueMapIterator;
-template<typename DenseMapT, typename KeyT>
-class ValueMapConstIterator;
+template <typename DenseMapT, typename KeyT> class ValueMapIterator;
+template <typename DenseMapT, typename KeyT> class ValueMapConstIterator;
/// This class defines the default behavior for configurable aspects of
/// ValueMap<>. User Configs should inherit from this class to be as compatible
/// as possible with future versions of ValueMap.
-template<typename KeyT, typename MutexT = sys::Mutex>
-struct ValueMapConfig {
+template <typename KeyT, typename MutexT = sys::Mutex> struct ValueMapConfig {
using mutex_type = MutexT;
/// If FollowRAUW is true, the ValueMap will update mappings on RAUW. If it's
@@ -66,21 +63,24 @@ struct ValueMapConfig {
// override all the defaults.
struct ExtraData {};
- template<typename ExtraDataT>
+ template <typename ExtraDataT>
static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {}
- template<typename ExtraDataT>
- static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {}
+ template <typename ExtraDataT>
+ static void onDelete(const ExtraDataT & /*Data*/, KeyT /*Old*/) {}
/// Returns a mutex that should be acquired around any changes to the map.
/// This is only acquired from the CallbackVH (and held around calls to onRAUW
/// and onDelete) and not inside other ValueMap methods. NULL means that no
/// mutex is necessary.
- template<typename ExtraDataT>
- static mutex_type *getMutex(const ExtraDataT &/*Data*/) { return nullptr; }
+ template <typename ExtraDataT>
+ static mutex_type *getMutex(const ExtraDataT & /*Data*/) {
+ return nullptr;
+ }
};
/// See the file comment.
-template<typename KeyT, typename ValueT, typename Config =ValueMapConfig<KeyT>>
+template <typename KeyT, typename ValueT,
+ typename Config = ValueMapConfig<KeyT>>
class ValueMap {
friend class ValueMapCallbackVH<KeyT, ValueT, Config>;
@@ -157,9 +157,7 @@ public:
return Map.find_as(Val) == Map.end() ? 0 : 1;
}
- iterator find(const KeyT &Val) {
- return iterator(Map.find_as(Val));
- }
+ iterator find(const KeyT &Val) { return iterator(Map.find_as(Val)); }
const_iterator find(const KeyT &Val) const {
return const_iterator(Map.find_as(Val));
}
@@ -186,8 +184,7 @@ public:
}
/// insert - Range insertion of pairs.
- template<typename InputIt>
- void insert(InputIt I, InputIt E) {
+ template <typename InputIt> void insert(InputIt I, InputIt E) {
for (; I != E; ++I)
insert(*I);
}
@@ -200,17 +197,13 @@ public:
Map.erase(I);
return true;
}
- void erase(iterator I) {
- return Map.erase(I.base());
- }
+ void erase(iterator I) { return Map.erase(I.base()); }
- value_type& FindAndConstruct(const KeyT &Key) {
+ value_type &FindAndConstruct(const KeyT &Key) {
return Map.FindAndConstruct(Wrap(Key));
}
- ValueT &operator[](const KeyT &Key) {
- return Map[Wrap(Key)];
- }
+ ValueT &operator[](const KeyT &Key) { return Map[Wrap(Key)]; }
/// isPointerIntoBucketsArray - Return true if the specified pointer points
/// somewhere into the ValueMap's array of buckets (i.e. either to a key or
@@ -235,7 +228,7 @@ private:
// the const_cast incorrect) is if it gets inserted into the map. But then
// this function must have been called from a non-const method, making the
// const_cast ok.
- return ValueMapCVH(key, const_cast<ValueMap*>(this));
+ return ValueMapCVH(key, const_cast<ValueMap *>(this));
}
};
@@ -252,7 +245,7 @@ class ValueMapCallbackVH final : public CallbackVH {
ValueMapT *Map;
ValueMapCallbackVH(KeyT Key, ValueMapT *Map)
- : CallbackVH(const_cast<Value*>(static_cast<const Value*>(Key))),
+ : CallbackVH(const_cast<Value *>(static_cast<const Value *>(Key))),
Map(Map) {}
// Private constructor used to create empty/tombstone DenseMap keys.
@@ -268,8 +261,8 @@ public:
std::unique_lock<typename Config::mutex_type> Guard;
if (M)
Guard = std::unique_lock<typename Config::mutex_type>(*M);
- Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this.
- Copy.Map->Map.erase(Copy); // Definitely destroys *this.
+ Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this.
+ Copy.Map->Map.erase(Copy); // Definitely destroys *this.
}
void allUsesReplacedWith(Value *new_key) override {
@@ -291,14 +284,14 @@ public:
// removed the old mapping.
if (I != Copy.Map->Map.end()) {
ValueT Target(std::move(I->second));
- Copy.Map->Map.erase(I); // Definitely destroys *this.
+ Copy.Map->Map.erase(I); // Definitely destroys *this.
Copy.Map->insert(std::make_pair(typed_new_key, std::move(Target)));
}
}
}
};
-template<typename KeyT, typename ValueT, typename Config>
+template <typename KeyT, typename ValueT, typename Config>
struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config>> {
using VH = ValueMapCallbackVH<KeyT, ValueT, Config>;
@@ -318,9 +311,7 @@ struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config>> {
return DenseMapInfo<KeyT>::getHashValue(Val);
}
- static bool isEqual(const VH &LHS, const VH &RHS) {
- return LHS == RHS;
- }
+ static bool isEqual(const VH &LHS, const VH &RHS) { return LHS == RHS; }
static bool isEqual(const KeyT &LHS, const VH &RHS) {
return LHS == RHS.getValPtr();
@@ -347,7 +338,7 @@ public:
struct ValueTypeProxy {
const KeyT first;
- ValueT& second;
+ ValueT &second;
ValueTypeProxy *operator->() { return this; }
@@ -361,23 +352,19 @@ public:
return Result;
}
- ValueTypeProxy operator->() const {
- return operator*();
- }
+ ValueTypeProxy operator->() const { return operator*(); }
- bool operator==(const ValueMapIterator &RHS) const {
- return I == RHS.I;
- }
- bool operator!=(const ValueMapIterator &RHS) const {
- return I != RHS.I;
- }
+ bool operator==(const ValueMapIterator &RHS) const { return I == RHS.I; }
+ bool operator!=(const ValueMapIterator &RHS) const { return I != RHS.I; }
- inline ValueMapIterator& operator++() { // Preincrement
+ inline ValueMapIterator &operator++() { // Preincrement
++I;
return *this;
}
- ValueMapIterator operator++(int) { // Postincrement
- ValueMapIterator tmp = *this; ++*this; return tmp;
+ ValueMapIterator operator++(int) { // Postincrement
+ ValueMapIterator tmp = *this;
+ ++*this;
+ return tmp;
}
};
@@ -397,13 +384,13 @@ public:
ValueMapConstIterator() : I() {}
ValueMapConstIterator(BaseT I) : I(I) {}
ValueMapConstIterator(ValueMapIterator<DenseMapT, KeyT> Other)
- : I(Other.base()) {}
+ : I(Other.base()) {}
BaseT base() const { return I; }
struct ValueTypeProxy {
const KeyT first;
- const ValueT& second;
+ const ValueT &second;
ValueTypeProxy *operator->() { return this; }
operator std::pair<KeyT, ValueT>() const {
return std::make_pair(first, second);
@@ -415,23 +402,19 @@ public:
return Result;
}
- ValueTypeProxy operator->() const {
- return operator*();
- }
+ ValueTypeProxy operator->() const { return operator*(); }
- bool operator==(const ValueMapConstIterator &RHS) const {
- return I == RHS.I;
- }
- bool operator!=(const ValueMapConstIterator &RHS) const {
- return I != RHS.I;
- }
+ bool operator==(const ValueMapConstIterator &RHS) const { return I == RHS.I; }
+ bool operator!=(const ValueMapConstIterator &RHS) const { return I != RHS.I; }
- inline ValueMapConstIterator& operator++() { // Preincrement
+ inline ValueMapConstIterator &operator++() { // Preincrement
++I;
return *this;
}
- ValueMapConstIterator operator++(int) { // Postincrement
- ValueMapConstIterator tmp = *this; ++*this; return tmp;
+ ValueMapConstIterator operator++(int) { // Postincrement
+ ValueMapConstIterator tmp = *this;
+ ++*this;
+ return tmp;
}
};
diff --git a/llvm/include/llvm/Object/ELF.h b/llvm/include/llvm/Object/ELF.h
index 0b362d3..59f63eb 100644
--- a/llvm/include/llvm/Object/ELF.h
+++ b/llvm/include/llvm/Object/ELF.h
@@ -407,7 +407,8 @@ public:
Elf_Note_Iterator notes_begin(const Elf_Phdr &Phdr, Error &Err) const {
assert(Phdr.p_type == ELF::PT_NOTE && "Phdr is not of type PT_NOTE");
ErrorAsOutParameter ErrAsOutParam(Err);
- if (Phdr.p_offset + Phdr.p_filesz > getBufSize()) {
+ if (Phdr.p_offset + Phdr.p_filesz > getBufSize() ||
+ Phdr.p_offset + Phdr.p_filesz < Phdr.p_offset) {
Err =
createError("invalid offset (0x" + Twine::utohexstr(Phdr.p_offset) +
") or size (0x" + Twine::utohexstr(Phdr.p_filesz) + ")");
@@ -435,7 +436,8 @@ public:
Elf_Note_Iterator notes_begin(const Elf_Shdr &Shdr, Error &Err) const {
assert(Shdr.sh_type == ELF::SHT_NOTE && "Shdr is not of type SHT_NOTE");
ErrorAsOutParameter ErrAsOutParam(Err);
- if (Shdr.sh_offset + Shdr.sh_size > getBufSize()) {
+ if (Shdr.sh_offset + Shdr.sh_size > getBufSize() ||
+ Shdr.sh_offset + Shdr.sh_size < Shdr.sh_offset) {
Err =
createError("invalid offset (0x" + Twine::utohexstr(Shdr.sh_offset) +
") or size (0x" + Twine::utohexstr(Shdr.sh_size) + ")");
diff --git a/llvm/include/llvm/Support/FileSystem.h b/llvm/include/llvm/Support/FileSystem.h
index c203779..cf2a810 100644
--- a/llvm/include/llvm/Support/FileSystem.h
+++ b/llvm/include/llvm/Support/FileSystem.h
@@ -268,18 +268,6 @@ public:
/// Make \a path an absolute path.
///
-/// Makes \a path absolute using the \a current_directory if it is not already.
-/// An empty \a path will result in the \a current_directory.
-///
-/// /absolute/path => /absolute/path
-/// relative/../path => <current-directory>/relative/../path
-///
-/// @param path A path that is modified to be an absolute path.
-LLVM_ABI void make_absolute(const Twine &current_directory,
- SmallVectorImpl<char> &path);
-
-/// Make \a path an absolute path.
-///
/// Makes \a path absolute using the current directory if it is not already. An
/// empty \a path will result in the current directory.
///
diff --git a/llvm/include/llvm/Support/Format.h b/llvm/include/llvm/Support/Format.h
index 2553002..34b224d 100644
--- a/llvm/include/llvm/Support/Format.h
+++ b/llvm/include/llvm/Support/Format.h
@@ -78,16 +78,6 @@ public:
/// printed, this synthesizes the string into a temporary buffer provided and
/// returns whether or not it is big enough.
-// Helper to validate that format() parameters are scalars or pointers.
-template <typename... Args> struct validate_format_parameters;
-template <typename Arg, typename... Args>
-struct validate_format_parameters<Arg, Args...> {
- static_assert(std::is_scalar_v<Arg>,
- "format can't be used with non fundamental / non pointer type");
- validate_format_parameters() { validate_format_parameters<Args...>(); }
-};
-template <> struct validate_format_parameters<> {};
-
template <typename... Ts>
class format_object final : public format_object_base {
std::tuple<Ts...> Vals;
@@ -105,7 +95,9 @@ class format_object final : public format_object_base {
public:
format_object(const char *fmt, const Ts &... vals)
: format_object_base(fmt), Vals(vals...) {
- validate_format_parameters<Ts...>();
+ static_assert(
+ (std::is_scalar_v<Ts> && ...),
+ "format can't be used with non fundamental / non pointer type");
}
int snprint(char *Buffer, unsigned BufferSize) const override {
diff --git a/llvm/include/llvm/Support/FormatProviders.h b/llvm/include/llvm/Support/FormatProviders.h
index 9147782..8eaa5e38 100644
--- a/llvm/include/llvm/Support/FormatProviders.h
+++ b/llvm/include/llvm/Support/FormatProviders.h
@@ -29,22 +29,18 @@ namespace support {
namespace detail {
template <typename T>
struct use_integral_formatter
- : public std::bool_constant<
- is_one_of<T, uint8_t, int16_t, uint16_t, int32_t, uint32_t, int64_t,
- uint64_t, int, unsigned, long, unsigned long, long long,
- unsigned long long>::value> {};
+ : public is_one_of<T, uint8_t, int16_t, uint16_t, int32_t, uint32_t,
+ int64_t, uint64_t, int, unsigned, long, unsigned long,
+ long long, unsigned long long> {};
template <typename T>
-struct use_char_formatter : public std::bool_constant<std::is_same_v<T, char>> {
-};
+struct use_char_formatter : public std::is_same<T, char> {};
template <typename T>
-struct is_cstring
- : public std::bool_constant<is_one_of<T, char *, const char *>::value> {};
+struct is_cstring : public is_one_of<T, char *, const char *> {};
template <typename T>
-struct use_string_formatter
- : public std::bool_constant<std::is_convertible_v<T, llvm::StringRef>> {};
+struct use_string_formatter : public std::is_convertible<T, llvm::StringRef> {};
template <typename T>
struct use_pointer_formatter
@@ -52,8 +48,7 @@ struct use_pointer_formatter
};
template <typename T>
-struct use_double_formatter
- : public std::bool_constant<std::is_floating_point_v<T>> {};
+struct use_double_formatter : public std::is_floating_point<T> {};
class HelperFunctions {
protected:
diff --git a/llvm/include/llvm/Support/FormatVariadicDetails.h b/llvm/include/llvm/Support/FormatVariadicDetails.h
index 4002caf..0fdc7b6 100644
--- a/llvm/include/llvm/Support/FormatVariadicDetails.h
+++ b/llvm/include/llvm/Support/FormatVariadicDetails.h
@@ -92,8 +92,7 @@ public:
// based format() invocation.
template <typename T>
struct uses_format_member
- : public std::bool_constant<
- std::is_base_of_v<format_adapter, std::remove_reference_t<T>>> {};
+ : public std::is_base_of<format_adapter, std::remove_reference_t<T>> {};
// Simple template that decides whether a type T should use the format_provider
// based format() invocation. The member function takes priority, so this test
diff --git a/llvm/include/llvm/Support/HashBuilder.h b/llvm/include/llvm/Support/HashBuilder.h
index ae266d3..d0130d6 100644
--- a/llvm/include/llvm/Support/HashBuilder.h
+++ b/llvm/include/llvm/Support/HashBuilder.h
@@ -31,8 +31,7 @@ namespace llvm {
namespace hashbuilder_detail {
/// Trait to indicate whether a type's bits can be hashed directly (after
/// endianness correction).
-template <typename U>
-struct IsHashableData : std::bool_constant<is_integral_or_enum<U>::value> {};
+template <typename U> struct IsHashableData : is_integral_or_enum<U> {};
} // namespace hashbuilder_detail
diff --git a/llvm/include/llvm/Support/InstructionCost.h b/llvm/include/llvm/Support/InstructionCost.h
index ab1c8eb..507c166 100644
--- a/llvm/include/llvm/Support/InstructionCost.h
+++ b/llvm/include/llvm/Support/InstructionCost.h
@@ -59,8 +59,8 @@ private:
State = Invalid;
}
- static CostType getMaxValue() { return std::numeric_limits<CostType>::max(); }
- static CostType getMinValue() { return std::numeric_limits<CostType>::min(); }
+ static constexpr CostType MaxValue = std::numeric_limits<CostType>::max();
+ static constexpr CostType MinValue = std::numeric_limits<CostType>::min();
public:
// A default constructed InstructionCost is a valid zero cost
@@ -69,8 +69,8 @@ public:
InstructionCost(CostState) = delete;
InstructionCost(CostType Val) : Value(Val), State(Valid) {}
- static InstructionCost getMax() { return getMaxValue(); }
- static InstructionCost getMin() { return getMinValue(); }
+ static InstructionCost getMax() { return MaxValue; }
+ static InstructionCost getMin() { return MinValue; }
static InstructionCost getInvalid(CostType Val = 0) {
InstructionCost Tmp(Val);
Tmp.setInvalid();
@@ -102,7 +102,7 @@ public:
// Saturating addition.
InstructionCost::CostType Result;
if (AddOverflow(Value, RHS.Value, Result))
- Result = RHS.Value > 0 ? getMaxValue() : getMinValue();
+ Result = RHS.Value > 0 ? MaxValue : MinValue;
Value = Result;
return *this;
@@ -120,7 +120,7 @@ public:
// Saturating subtract.
InstructionCost::CostType Result;
if (SubOverflow(Value, RHS.Value, Result))
- Result = RHS.Value > 0 ? getMinValue() : getMaxValue();
+ Result = RHS.Value > 0 ? MinValue : MaxValue;
Value = Result;
return *this;
}
@@ -138,9 +138,9 @@ public:
InstructionCost::CostType Result;
if (MulOverflow(Value, RHS.Value, Result)) {
if ((Value > 0 && RHS.Value > 0) || (Value < 0 && RHS.Value < 0))
- Result = getMaxValue();
+ Result = MaxValue;
else
- Result = getMinValue();
+ Result = MinValue;
}
Value = Result;
diff --git a/llvm/include/llvm/Support/Path.h b/llvm/include/llvm/Support/Path.h
index 0cb5171..a8e0f33 100644
--- a/llvm/include/llvm/Support/Path.h
+++ b/llvm/include/llvm/Support/Path.h
@@ -566,6 +566,18 @@ LLVM_ABI bool is_absolute_gnu(const Twine &path, Style style = Style::native);
/// @result True if the path is relative, false if it is not.
LLVM_ABI bool is_relative(const Twine &path, Style style = Style::native);
+/// Make \a path an absolute path.
+///
+/// Makes \a path absolute using the \a current_directory if it is not already.
+/// An empty \a path will result in the \a current_directory.
+///
+/// /absolute/path => /absolute/path
+/// relative/../path => <current-directory>/relative/../path
+///
+/// @param path A path that is modified to be an absolute path.
+LLVM_ABI void make_absolute(const Twine &current_directory,
+ SmallVectorImpl<char> &path);
+
} // end namespace path
} // end namespace sys
} // end namespace llvm
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index 7710e2f..e5531456 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -650,6 +650,9 @@ HANDLE_TARGET_OPCODE(G_FDIV)
/// Generic FP remainder.
HANDLE_TARGET_OPCODE(G_FREM)
+/// Generic FP modf
+HANDLE_TARGET_OPCODE(G_FMODF)
+
/// Generic FP exponentiation.
HANDLE_TARGET_OPCODE(G_FPOW)
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 733d10b..faf7788 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -981,6 +981,13 @@ def G_FREM : GenericInstruction {
let hasSideEffects = false;
}
+/// Generic FP modf
+def G_FMODF : GenericInstruction {
+ let OutOperandList = (outs type0:$dst1, type0:$dst2);
+ let InOperandList = (ins type0:$src1);
+ let hasSideEffects = false;
+}
+
// Floating point exponentiation.
def G_FPOW : GenericInstruction {
let OutOperandList = (outs type0:$dst);
diff --git a/llvm/include/llvm/Target/TargetMachine.h b/llvm/include/llvm/Target/TargetMachine.h
index bf4e490..d0fd483 100644
--- a/llvm/include/llvm/Target/TargetMachine.h
+++ b/llvm/include/llvm/Target/TargetMachine.h
@@ -29,10 +29,10 @@
#include <string>
#include <utility>
-LLVM_ABI extern llvm::cl::opt<bool> NoKernelInfoEndLTO;
-
namespace llvm {
+LLVM_ABI extern llvm::cl::opt<bool> NoKernelInfoEndLTO;
+
class AAManager;
using ModulePassManager = PassManager<Module>;
diff --git a/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h b/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h
index 6178622..dfd6e2f 100644
--- a/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h
+++ b/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h
@@ -15,7 +15,12 @@ namespace llvm {
class Function;
-struct JumpTableToSwitchPass : PassInfoMixin<JumpTableToSwitchPass> {
+class JumpTableToSwitchPass : public PassInfoMixin<JumpTableToSwitchPass> {
+ // Necessary until we switch to GUIDs as metadata, after which we can drop it.
+ const bool InLTO;
+
+public:
+ explicit JumpTableToSwitchPass(bool InLTO = false) : InLTO(InLTO) {}
/// Run the pass over the function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
diff --git a/llvm/lib/Analysis/CaptureTracking.cpp b/llvm/lib/Analysis/CaptureTracking.cpp
index a0fe7f9..22229d9 100644
--- a/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/llvm/lib/Analysis/CaptureTracking.cpp
@@ -320,8 +320,12 @@ UseCaptureInfo llvm::DetermineUseCaptureKind(const Use &U, const Value *Base) {
return CaptureComponents::None;
case Instruction::Store:
// Stored the pointer - conservatively assume it may be captured.
+ if (U.getOperandNo() == 0)
+ return MDNode::toCaptureComponents(
+ I->getMetadata(LLVMContext::MD_captures));
+
// Volatile stores make the address observable.
- if (U.getOperandNo() == 0 || cast<StoreInst>(I)->isVolatile())
+ if (cast<StoreInst>(I)->isVolatile())
return CaptureComponents::All;
return CaptureComponents::None;
case Instruction::AtomicRMW: {
diff --git a/llvm/lib/Analysis/CtxProfAnalysis.cpp b/llvm/lib/Analysis/CtxProfAnalysis.cpp
index a363bce..c4abec0 100644
--- a/llvm/lib/Analysis/CtxProfAnalysis.cpp
+++ b/llvm/lib/Analysis/CtxProfAnalysis.cpp
@@ -30,6 +30,9 @@
#define DEBUG_TYPE "ctx_prof"
using namespace llvm;
+
+namespace llvm {
+
cl::opt<std::string>
UseCtxProfile("use-ctx-profile", cl::init(""), cl::Hidden,
cl::desc("Use the specified contextual profile file"));
@@ -50,7 +53,6 @@ static cl::opt<bool> ForceIsInSpecializedModule(
const char *AssignGUIDPass::GUIDMetadataName = "guid";
-namespace llvm {
class ProfileAnnotatorImpl final {
friend class ProfileAnnotator;
class BBInfo;
diff --git a/llvm/lib/Analysis/IR2Vec.cpp b/llvm/lib/Analysis/IR2Vec.cpp
index 99afc06..295b6d3 100644
--- a/llvm/lib/Analysis/IR2Vec.cpp
+++ b/llvm/lib/Analysis/IR2Vec.cpp
@@ -15,6 +15,7 @@
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/Sequence.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Module.h"
@@ -216,6 +217,8 @@ void SymbolicEmbedder::computeEmbeddings(const BasicBlock &BB) const {
ArgEmb += Vocab[*Op];
auto InstVector =
Vocab[I.getOpcode()] + Vocab[I.getType()->getTypeID()] + ArgEmb;
+ if (const auto *IC = dyn_cast<CmpInst>(&I))
+ InstVector += Vocab[IC->getPredicate()];
InstVecMap[&I] = InstVector;
BBVector += InstVector;
}
@@ -250,6 +253,9 @@ void FlowAwareEmbedder::computeEmbeddings(const BasicBlock &BB) const {
// embeddings
auto InstVector =
Vocab[I.getOpcode()] + Vocab[I.getType()->getTypeID()] + ArgEmb;
+ // Add compare predicate embedding as an additional operand if applicable
+ if (const auto *IC = dyn_cast<CmpInst>(&I))
+ InstVector += Vocab[IC->getPredicate()];
InstVecMap[&I] = InstVector;
BBVector += InstVector;
}
@@ -257,42 +263,114 @@ void FlowAwareEmbedder::computeEmbeddings(const BasicBlock &BB) const {
}
// ==----------------------------------------------------------------------===//
-// Vocabulary
+// VocabStorage
//===----------------------------------------------------------------------===//
-unsigned Vocabulary::getDimension() const {
- assert(isValid() && "IR2Vec Vocabulary is invalid");
- return Vocab[0].size();
+VocabStorage::VocabStorage(std::vector<std::vector<Embedding>> &&SectionData)
+ : Sections(std::move(SectionData)), TotalSize([&] {
+ assert(!Sections.empty() && "Vocabulary has no sections");
+ // Compute total size across all sections
+ size_t Size = 0;
+ for (const auto &Section : Sections) {
+ assert(!Section.empty() && "Vocabulary section is empty");
+ Size += Section.size();
+ }
+ return Size;
+ }()),
+ Dimension([&] {
+ // Get dimension from the first embedding in the first section - all
+ // embeddings must have the same dimension
+ assert(!Sections.empty() && "Vocabulary has no sections");
+ assert(!Sections[0].empty() && "First section of vocabulary is empty");
+ unsigned ExpectedDim = static_cast<unsigned>(Sections[0][0].size());
+
+ // Verify that all embeddings across all sections have the same
+ // dimension
+ [[maybe_unused]] auto allSameDim =
+ [ExpectedDim](const std::vector<Embedding> &Section) {
+ return std::all_of(Section.begin(), Section.end(),
+ [ExpectedDim](const Embedding &Emb) {
+ return Emb.size() == ExpectedDim;
+ });
+ };
+ assert(std::all_of(Sections.begin(), Sections.end(), allSameDim) &&
+ "All embeddings must have the same dimension");
+
+ return ExpectedDim;
+ }()) {}
+
+const Embedding &VocabStorage::const_iterator::operator*() const {
+ assert(SectionId < Storage->Sections.size() && "Invalid section ID");
+ assert(LocalIndex < Storage->Sections[SectionId].size() &&
+ "Local index out of range");
+ return Storage->Sections[SectionId][LocalIndex];
+}
+
+VocabStorage::const_iterator &VocabStorage::const_iterator::operator++() {
+ ++LocalIndex;
+ // Check if we need to move to the next section
+ if (SectionId < Storage->getNumSections() &&
+ LocalIndex >= Storage->Sections[SectionId].size()) {
+ assert(LocalIndex == Storage->Sections[SectionId].size() &&
+ "Local index should be at the end of the current section");
+ LocalIndex = 0;
+ ++SectionId;
+ }
+ return *this;
}
-unsigned Vocabulary::getSlotIndex(unsigned Opcode) {
- assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode");
- return Opcode - 1; // Convert to zero-based index
+bool VocabStorage::const_iterator::operator==(
+ const const_iterator &Other) const {
+ return Storage == Other.Storage && SectionId == Other.SectionId &&
+ LocalIndex == Other.LocalIndex;
}
-unsigned Vocabulary::getSlotIndex(Type::TypeID TypeID) {
- assert(static_cast<unsigned>(TypeID) < MaxTypeIDs && "Invalid type ID");
- return MaxOpcodes + static_cast<unsigned>(getCanonicalTypeID(TypeID));
+bool VocabStorage::const_iterator::operator!=(
+ const const_iterator &Other) const {
+ return !(*this == Other);
}
-unsigned Vocabulary::getSlotIndex(const Value &Op) {
- unsigned Index = static_cast<unsigned>(getOperandKind(&Op));
- assert(Index < MaxOperandKinds && "Invalid OperandKind");
- return MaxOpcodes + MaxCanonicalTypeIDs + Index;
-}
+Error VocabStorage::parseVocabSection(StringRef Key,
+ const json::Value &ParsedVocabValue,
+ VocabMap &TargetVocab, unsigned &Dim) {
+ json::Path::Root Path("");
+ const json::Object *RootObj = ParsedVocabValue.getAsObject();
+ if (!RootObj)
+ return createStringError(errc::invalid_argument,
+ "JSON root is not an object");
-const Embedding &Vocabulary::operator[](unsigned Opcode) const {
- return Vocab[getSlotIndex(Opcode)];
-}
+ const json::Value *SectionValue = RootObj->get(Key);
+ if (!SectionValue)
+ return createStringError(errc::invalid_argument,
+ "Missing '" + std::string(Key) +
+ "' section in vocabulary file");
+ if (!json::fromJSON(*SectionValue, TargetVocab, Path))
+ return createStringError(errc::illegal_byte_sequence,
+ "Unable to parse '" + std::string(Key) +
+ "' section from vocabulary");
-const Embedding &Vocabulary::operator[](Type::TypeID TypeID) const {
- return Vocab[getSlotIndex(TypeID)];
-}
+ Dim = TargetVocab.begin()->second.size();
+ if (Dim == 0)
+ return createStringError(errc::illegal_byte_sequence,
+ "Dimension of '" + std::string(Key) +
+ "' section of the vocabulary is zero");
+
+ if (!std::all_of(TargetVocab.begin(), TargetVocab.end(),
+ [Dim](const std::pair<StringRef, Embedding> &Entry) {
+ return Entry.second.size() == Dim;
+ }))
+ return createStringError(
+ errc::illegal_byte_sequence,
+ "All vectors in the '" + std::string(Key) +
+ "' section of the vocabulary are not of the same dimension");
-const ir2vec::Embedding &Vocabulary::operator[](const Value &Arg) const {
- return Vocab[getSlotIndex(Arg)];
+ return Error::success();
}
+// ==----------------------------------------------------------------------===//
+// Vocabulary
+//===----------------------------------------------------------------------===//
+
StringRef Vocabulary::getVocabKeyForOpcode(unsigned Opcode) {
assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode");
#define HANDLE_INST(NUM, OPCODE, CLASS) \
@@ -304,29 +382,6 @@ StringRef Vocabulary::getVocabKeyForOpcode(unsigned Opcode) {
return "UnknownOpcode";
}
-StringRef Vocabulary::getVocabKeyForCanonicalTypeID(CanonicalTypeID CType) {
- unsigned Index = static_cast<unsigned>(CType);
- assert(Index < MaxCanonicalTypeIDs && "Invalid CanonicalTypeID");
- return CanonicalTypeNames[Index];
-}
-
-Vocabulary::CanonicalTypeID
-Vocabulary::getCanonicalTypeID(Type::TypeID TypeID) {
- unsigned Index = static_cast<unsigned>(TypeID);
- assert(Index < MaxTypeIDs && "Invalid TypeID");
- return TypeIDMapping[Index];
-}
-
-StringRef Vocabulary::getVocabKeyForTypeID(Type::TypeID TypeID) {
- return getVocabKeyForCanonicalTypeID(getCanonicalTypeID(TypeID));
-}
-
-StringRef Vocabulary::getVocabKeyForOperandKind(Vocabulary::OperandKind Kind) {
- unsigned Index = static_cast<unsigned>(Kind);
- assert(Index < MaxOperandKinds && "Invalid OperandKind");
- return OperandKindNames[Index];
-}
-
// Helper function to classify an operand into OperandKind
Vocabulary::OperandKind Vocabulary::getOperandKind(const Value *Op) {
if (isa<Function>(Op))
@@ -338,18 +393,50 @@ Vocabulary::OperandKind Vocabulary::getOperandKind(const Value *Op) {
return OperandKind::VariableID;
}
+unsigned Vocabulary::getPredicateLocalIndex(CmpInst::Predicate P) {
+ if (P >= CmpInst::FIRST_FCMP_PREDICATE && P <= CmpInst::LAST_FCMP_PREDICATE)
+ return P - CmpInst::FIRST_FCMP_PREDICATE;
+ else
+ return P - CmpInst::FIRST_ICMP_PREDICATE +
+ (CmpInst::LAST_FCMP_PREDICATE - CmpInst::FIRST_FCMP_PREDICATE + 1);
+}
+
+CmpInst::Predicate Vocabulary::getPredicateFromLocalIndex(unsigned LocalIndex) {
+ unsigned fcmpRange =
+ CmpInst::LAST_FCMP_PREDICATE - CmpInst::FIRST_FCMP_PREDICATE + 1;
+ if (LocalIndex < fcmpRange)
+ return static_cast<CmpInst::Predicate>(CmpInst::FIRST_FCMP_PREDICATE +
+ LocalIndex);
+ else
+ return static_cast<CmpInst::Predicate>(CmpInst::FIRST_ICMP_PREDICATE +
+ LocalIndex - fcmpRange);
+}
+
+StringRef Vocabulary::getVocabKeyForPredicate(CmpInst::Predicate Pred) {
+ static SmallString<16> PredNameBuffer;
+ if (Pred < CmpInst::FIRST_ICMP_PREDICATE)
+ PredNameBuffer = "FCMP_";
+ else
+ PredNameBuffer = "ICMP_";
+ PredNameBuffer += CmpInst::getPredicateName(Pred);
+ return PredNameBuffer;
+}
+
StringRef Vocabulary::getStringKey(unsigned Pos) {
assert(Pos < NumCanonicalEntries && "Position out of bounds in vocabulary");
// Opcode
if (Pos < MaxOpcodes)
return getVocabKeyForOpcode(Pos + 1);
// Type
- if (Pos < MaxOpcodes + MaxCanonicalTypeIDs)
+ if (Pos < OperandBaseOffset)
return getVocabKeyForCanonicalTypeID(
static_cast<CanonicalTypeID>(Pos - MaxOpcodes));
// Operand
- return getVocabKeyForOperandKind(
- static_cast<OperandKind>(Pos - MaxOpcodes - MaxCanonicalTypeIDs));
+ if (Pos < PredicateBaseOffset)
+ return getVocabKeyForOperandKind(
+ static_cast<OperandKind>(Pos - OperandBaseOffset));
+ // Predicates
+ return getVocabKeyForPredicate(getPredicate(Pos - PredicateBaseOffset));
}
// For now, assume vocabulary is stable unless explicitly invalidated.
@@ -359,65 +446,62 @@ bool Vocabulary::invalidate(Module &M, const PreservedAnalyses &PA,
return !(PAC.preservedWhenStateless());
}
-Vocabulary::VocabVector Vocabulary::createDummyVocabForTest(unsigned Dim) {
- VocabVector DummyVocab;
- DummyVocab.reserve(NumCanonicalEntries);
+VocabStorage Vocabulary::createDummyVocabForTest(unsigned Dim) {
float DummyVal = 0.1f;
- // Create a dummy vocabulary with entries for all opcodes, types, and
- // operands
- for ([[maybe_unused]] unsigned _ :
- seq(0u, Vocabulary::MaxOpcodes + Vocabulary::MaxCanonicalTypeIDs +
- Vocabulary::MaxOperandKinds)) {
- DummyVocab.push_back(Embedding(Dim, DummyVal));
- DummyVal += 0.1f;
- }
- return DummyVocab;
-}
-// ==----------------------------------------------------------------------===//
-// IR2VecVocabAnalysis
-//===----------------------------------------------------------------------===//
+ // Create sections for opcodes, types, operands, and predicates
+ // Order must match Vocabulary::Section enum
+ std::vector<std::vector<Embedding>> Sections;
+ Sections.reserve(4);
-Error IR2VecVocabAnalysis::parseVocabSection(
- StringRef Key, const json::Value &ParsedVocabValue, VocabMap &TargetVocab,
- unsigned &Dim) {
- json::Path::Root Path("");
- const json::Object *RootObj = ParsedVocabValue.getAsObject();
- if (!RootObj)
- return createStringError(errc::invalid_argument,
- "JSON root is not an object");
+ // Opcodes section
+ std::vector<Embedding> OpcodeSec;
+ OpcodeSec.reserve(MaxOpcodes);
+ for (unsigned I = 0; I < MaxOpcodes; ++I) {
+ OpcodeSec.emplace_back(Dim, DummyVal);
+ DummyVal += 0.1f;
+ }
+ Sections.push_back(std::move(OpcodeSec));
- const json::Value *SectionValue = RootObj->get(Key);
- if (!SectionValue)
- return createStringError(errc::invalid_argument,
- "Missing '" + std::string(Key) +
- "' section in vocabulary file");
- if (!json::fromJSON(*SectionValue, TargetVocab, Path))
- return createStringError(errc::illegal_byte_sequence,
- "Unable to parse '" + std::string(Key) +
- "' section from vocabulary");
+ // Types section
+ std::vector<Embedding> TypeSec;
+ TypeSec.reserve(MaxCanonicalTypeIDs);
+ for (unsigned I = 0; I < MaxCanonicalTypeIDs; ++I) {
+ TypeSec.emplace_back(Dim, DummyVal);
+ DummyVal += 0.1f;
+ }
+ Sections.push_back(std::move(TypeSec));
- Dim = TargetVocab.begin()->second.size();
- if (Dim == 0)
- return createStringError(errc::illegal_byte_sequence,
- "Dimension of '" + std::string(Key) +
- "' section of the vocabulary is zero");
+ // Operands section
+ std::vector<Embedding> OperandSec;
+ OperandSec.reserve(MaxOperandKinds);
+ for (unsigned I = 0; I < MaxOperandKinds; ++I) {
+ OperandSec.emplace_back(Dim, DummyVal);
+ DummyVal += 0.1f;
+ }
+ Sections.push_back(std::move(OperandSec));
- if (!std::all_of(TargetVocab.begin(), TargetVocab.end(),
- [Dim](const std::pair<StringRef, Embedding> &Entry) {
- return Entry.second.size() == Dim;
- }))
- return createStringError(
- errc::illegal_byte_sequence,
- "All vectors in the '" + std::string(Key) +
- "' section of the vocabulary are not of the same dimension");
+ // Predicates section
+ std::vector<Embedding> PredicateSec;
+ PredicateSec.reserve(MaxPredicateKinds);
+ for (unsigned I = 0; I < MaxPredicateKinds; ++I) {
+ PredicateSec.emplace_back(Dim, DummyVal);
+ DummyVal += 0.1f;
+ }
+ Sections.push_back(std::move(PredicateSec));
- return Error::success();
+ return VocabStorage(std::move(Sections));
}
+// ==----------------------------------------------------------------------===//
+// IR2VecVocabAnalysis
+//===----------------------------------------------------------------------===//
+
// FIXME: Make this optional. We can avoid file reads
// by auto-generating a default vocabulary during the build time.
-Error IR2VecVocabAnalysis::readVocabulary() {
+Error IR2VecVocabAnalysis::readVocabulary(VocabMap &OpcVocab,
+ VocabMap &TypeVocab,
+ VocabMap &ArgVocab) {
auto BufOrError = MemoryBuffer::getFileOrSTDIN(VocabFile, /*IsText=*/true);
if (!BufOrError)
return createFileError(VocabFile, BufOrError.getError());
@@ -429,16 +513,16 @@ Error IR2VecVocabAnalysis::readVocabulary() {
return ParsedVocabValue.takeError();
unsigned OpcodeDim = 0, TypeDim = 0, ArgDim = 0;
- if (auto Err =
- parseVocabSection("Opcodes", *ParsedVocabValue, OpcVocab, OpcodeDim))
+ if (auto Err = VocabStorage::parseVocabSection("Opcodes", *ParsedVocabValue,
+ OpcVocab, OpcodeDim))
return Err;
- if (auto Err =
- parseVocabSection("Types", *ParsedVocabValue, TypeVocab, TypeDim))
+ if (auto Err = VocabStorage::parseVocabSection("Types", *ParsedVocabValue,
+ TypeVocab, TypeDim))
return Err;
- if (auto Err =
- parseVocabSection("Arguments", *ParsedVocabValue, ArgVocab, ArgDim))
+ if (auto Err = VocabStorage::parseVocabSection("Arguments", *ParsedVocabValue,
+ ArgVocab, ArgDim))
return Err;
if (!(OpcodeDim == TypeDim && TypeDim == ArgDim))
@@ -448,7 +532,9 @@ Error IR2VecVocabAnalysis::readVocabulary() {
return Error::success();
}
-void IR2VecVocabAnalysis::generateNumMappedVocab() {
+void IR2VecVocabAnalysis::generateVocabStorage(VocabMap &OpcVocab,
+ VocabMap &TypeVocab,
+ VocabMap &ArgVocab) {
// Helper for handling missing entities in the vocabulary.
// Currently, we use a zero vector. In the future, we will throw an error to
@@ -466,7 +552,6 @@ void IR2VecVocabAnalysis::generateNumMappedVocab() {
// Handle Opcodes
std::vector<Embedding> NumericOpcodeEmbeddings(Vocabulary::MaxOpcodes,
Embedding(Dim));
- NumericOpcodeEmbeddings.reserve(Vocabulary::MaxOpcodes);
for (unsigned Opcode : seq(0u, Vocabulary::MaxOpcodes)) {
StringRef VocabKey = Vocabulary::getVocabKeyForOpcode(Opcode + 1);
auto It = OpcVocab.find(VocabKey.str());
@@ -475,13 +560,10 @@ void IR2VecVocabAnalysis::generateNumMappedVocab() {
else
handleMissingEntity(VocabKey.str());
}
- Vocab.insert(Vocab.end(), NumericOpcodeEmbeddings.begin(),
- NumericOpcodeEmbeddings.end());
// Handle Types - only canonical types are present in vocabulary
std::vector<Embedding> NumericTypeEmbeddings(Vocabulary::MaxCanonicalTypeIDs,
Embedding(Dim));
- NumericTypeEmbeddings.reserve(Vocabulary::MaxCanonicalTypeIDs);
for (unsigned CTypeID : seq(0u, Vocabulary::MaxCanonicalTypeIDs)) {
StringRef VocabKey = Vocabulary::getVocabKeyForCanonicalTypeID(
static_cast<Vocabulary::CanonicalTypeID>(CTypeID));
@@ -491,13 +573,10 @@ void IR2VecVocabAnalysis::generateNumMappedVocab() {
}
handleMissingEntity(VocabKey.str());
}
- Vocab.insert(Vocab.end(), NumericTypeEmbeddings.begin(),
- NumericTypeEmbeddings.end());
// Handle Arguments/Operands
std::vector<Embedding> NumericArgEmbeddings(Vocabulary::MaxOperandKinds,
Embedding(Dim));
- NumericArgEmbeddings.reserve(Vocabulary::MaxOperandKinds);
for (unsigned OpKind : seq(0u, Vocabulary::MaxOperandKinds)) {
Vocabulary::OperandKind Kind = static_cast<Vocabulary::OperandKind>(OpKind);
StringRef VocabKey = Vocabulary::getVocabKeyForOperandKind(Kind);
@@ -508,15 +587,37 @@ void IR2VecVocabAnalysis::generateNumMappedVocab() {
}
handleMissingEntity(VocabKey.str());
}
- Vocab.insert(Vocab.end(), NumericArgEmbeddings.begin(),
- NumericArgEmbeddings.end());
-}
-IR2VecVocabAnalysis::IR2VecVocabAnalysis(const VocabVector &Vocab)
- : Vocab(Vocab) {}
+ // Handle Predicates: part of Operands section. We look up predicate keys
+ // in ArgVocab.
+ std::vector<Embedding> NumericPredEmbeddings(Vocabulary::MaxPredicateKinds,
+ Embedding(Dim, 0));
+ for (unsigned PK : seq(0u, Vocabulary::MaxPredicateKinds)) {
+ StringRef VocabKey =
+ Vocabulary::getVocabKeyForPredicate(Vocabulary::getPredicate(PK));
+ auto It = ArgVocab.find(VocabKey.str());
+ if (It != ArgVocab.end()) {
+ NumericPredEmbeddings[PK] = It->second;
+ continue;
+ }
+ handleMissingEntity(VocabKey.str());
+ }
-IR2VecVocabAnalysis::IR2VecVocabAnalysis(VocabVector &&Vocab)
- : Vocab(std::move(Vocab)) {}
+ // Create section-based storage instead of flat vocabulary
+ // Order must match Vocabulary::Section enum
+ std::vector<std::vector<Embedding>> Sections(4);
+ Sections[static_cast<unsigned>(Vocabulary::Section::Opcodes)] =
+ std::move(NumericOpcodeEmbeddings); // Section::Opcodes
+ Sections[static_cast<unsigned>(Vocabulary::Section::CanonicalTypes)] =
+ std::move(NumericTypeEmbeddings); // Section::CanonicalTypes
+ Sections[static_cast<unsigned>(Vocabulary::Section::Operands)] =
+ std::move(NumericArgEmbeddings); // Section::Operands
+ Sections[static_cast<unsigned>(Vocabulary::Section::Predicates)] =
+ std::move(NumericPredEmbeddings); // Section::Predicates
+
+ // Create VocabStorage from organized sections
+ Vocab.emplace(std::move(Sections));
+}
void IR2VecVocabAnalysis::emitError(Error Err, LLVMContext &Ctx) {
handleAllErrors(std::move(Err), [&](const ErrorInfoBase &EI) {
@@ -528,8 +629,8 @@ IR2VecVocabAnalysis::Result
IR2VecVocabAnalysis::run(Module &M, ModuleAnalysisManager &AM) {
auto Ctx = &M.getContext();
// If vocabulary is already populated by the constructor, use it.
- if (!Vocab.empty())
- return Vocabulary(std::move(Vocab));
+ if (Vocab.has_value())
+ return Vocabulary(std::move(Vocab.value()));
// Otherwise, try to read from the vocabulary file.
if (VocabFile.empty()) {
@@ -538,7 +639,9 @@ IR2VecVocabAnalysis::run(Module &M, ModuleAnalysisManager &AM) {
"set it using --ir2vec-vocab-path");
return Vocabulary(); // Return invalid result
}
- if (auto Err = readVocabulary()) {
+
+ VocabMap OpcVocab, TypeVocab, ArgVocab;
+ if (auto Err = readVocabulary(OpcVocab, TypeVocab, ArgVocab)) {
emitError(std::move(Err), *Ctx);
return Vocabulary();
}
@@ -553,9 +656,9 @@ IR2VecVocabAnalysis::run(Module &M, ModuleAnalysisManager &AM) {
scaleVocabSection(ArgVocab, ArgWeight);
// Generate the numeric lookup vocabulary
- generateNumMappedVocab();
+ generateVocabStorage(OpcVocab, TypeVocab, ArgVocab);
- return Vocabulary(std::move(Vocab));
+ return Vocabulary(std::move(Vocab.value()));
}
// ==----------------------------------------------------------------------===//
@@ -564,7 +667,7 @@ IR2VecVocabAnalysis::run(Module &M, ModuleAnalysisManager &AM) {
PreservedAnalyses IR2VecPrinterPass::run(Module &M,
ModuleAnalysisManager &MAM) {
- auto Vocabulary = MAM.getResult<IR2VecVocabAnalysis>(M);
+ auto &Vocabulary = MAM.getResult<IR2VecVocabAnalysis>(M);
assert(Vocabulary.isValid() && "IR2Vec Vocabulary is invalid");
for (Function &F : M) {
@@ -606,7 +709,7 @@ PreservedAnalyses IR2VecPrinterPass::run(Module &M,
PreservedAnalyses IR2VecVocabPrinterPass::run(Module &M,
ModuleAnalysisManager &MAM) {
- auto IR2VecVocabulary = MAM.getResult<IR2VecVocabAnalysis>(M);
+ auto &IR2VecVocabulary = MAM.getResult<IR2VecVocabAnalysis>(M);
assert(IR2VecVocabulary.isValid() && "IR2Vec Vocabulary is invalid");
// Print each entry
diff --git a/llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp b/llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp
index 7b93474..25e7a97 100644
--- a/llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp
+++ b/llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp
@@ -22,6 +22,8 @@ using namespace llvm;
#define DEBUG_TYPE "pgo-icall-prom-analysis"
+namespace llvm {
+
// The percent threshold for the direct-call target (this call site vs the
// remaining call count) for it to be considered as the promotion target.
static cl::opt<unsigned> ICPRemainingPercentThreshold(
@@ -54,6 +56,8 @@ cl::opt<unsigned> MaxNumVTableAnnotations(
"icp-max-num-vtables", cl::init(6), cl::Hidden,
cl::desc("Max number of vtables annotated for a vtable load instruction."));
+} // end namespace llvm
+
bool ICallPromotionAnalysis::isPromotionProfitable(uint64_t Count,
uint64_t TotalCount,
uint64_t RemainingCount) {
diff --git a/llvm/lib/Analysis/InlineAdvisor.cpp b/llvm/lib/Analysis/InlineAdvisor.cpp
index 28b14c2..0fa804f 100644
--- a/llvm/lib/Analysis/InlineAdvisor.cpp
+++ b/llvm/lib/Analysis/InlineAdvisor.cpp
@@ -217,7 +217,7 @@ AnalysisKey PluginInlineAdvisorAnalysis::Key;
bool InlineAdvisorAnalysis::initializeIR2VecVocabIfRequested(
Module &M, ModuleAnalysisManager &MAM) {
if (!IR2VecVocabFile.empty()) {
- auto IR2VecVocabResult = MAM.getResult<IR2VecVocabAnalysis>(M);
+ auto &IR2VecVocabResult = MAM.getResult<IR2VecVocabAnalysis>(M);
if (!IR2VecVocabResult.isValid()) {
M.getContext().emitError("Failed to load IR2Vec vocabulary");
return false;
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 6fb2807..0e5bc48 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -1632,19 +1632,25 @@ LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
*getValueFromCondition(Usr->getOperand(0), Condition,
isTrueDest, /*UseBlockValue*/ false);
- if (!OpLatticeVal.isConstantRange())
- return OpLatticeVal;
+ if (OpLatticeVal.isConstantRange()) {
+ const unsigned ResultBitWidth =
+ Usr->getType()->getScalarSizeInBits();
+ if (auto *Trunc = dyn_cast<TruncInst>(Usr))
+ return ValueLatticeElement::getRange(
+ OpLatticeVal.getConstantRange().truncate(
+ ResultBitWidth, Trunc->getNoWrapKind()));
- const unsigned ResultBitWidth =
- Usr->getType()->getScalarSizeInBits();
- if (auto *Trunc = dyn_cast<TruncInst>(Usr))
return ValueLatticeElement::getRange(
- OpLatticeVal.getConstantRange().truncate(
- ResultBitWidth, Trunc->getNoWrapKind()));
-
- return ValueLatticeElement::getRange(
- OpLatticeVal.getConstantRange().castOp(
- cast<CastInst>(Usr)->getOpcode(), ResultBitWidth));
+ OpLatticeVal.getConstantRange().castOp(
+ cast<CastInst>(Usr)->getOpcode(), ResultBitWidth));
+ }
+ if (OpLatticeVal.isConstant()) {
+ Constant *C = OpLatticeVal.getConstant();
+ if (auto *CastC = ConstantFoldCastOperand(
+ cast<CastInst>(Usr)->getOpcode(), C, Usr->getType(), DL))
+ return ValueLatticeElement::get(CastC);
+ }
+ return ValueLatticeElement::getOverdefined();
} else {
// If one of Val's operand has an inferred value, we may be able to
// infer the value of Val.
diff --git a/llvm/lib/Analysis/MemoryProfileInfo.cpp b/llvm/lib/Analysis/MemoryProfileInfo.cpp
index b5ca6b1..0c1f8db 100644
--- a/llvm/lib/Analysis/MemoryProfileInfo.cpp
+++ b/llvm/lib/Analysis/MemoryProfileInfo.cpp
@@ -22,6 +22,8 @@ using namespace llvm::memprof;
#define DEBUG_TYPE "memory-profile-info"
+namespace llvm {
+
cl::opt<bool> MemProfReportHintedSizes(
"memprof-report-hinted-sizes", cl::init(false), cl::Hidden,
cl::desc("Report total allocation sizes of hinted allocations"));
@@ -52,6 +54,8 @@ cl::opt<unsigned> MinPercentMaxColdSize(
"memprof-min-percent-max-cold-size", cl::init(100), cl::Hidden,
cl::desc("Min percent of max cold bytes for critical cold context"));
+} // end namespace llvm
+
bool llvm::memprof::metadataIncludesAllContextSizeInfo() {
return MemProfReportHintedSizes || MinClonedColdBytePercent < 100;
}
@@ -121,24 +125,6 @@ bool llvm::memprof::hasSingleAllocType(uint8_t AllocTypes) {
return NumAllocTypes == 1;
}
-void llvm::memprof::removeAnyExistingAmbiguousAttribute(CallBase *CB) {
- if (!CB->hasFnAttr("memprof"))
- return;
- assert(CB->getFnAttr("memprof").getValueAsString() == "ambiguous");
- CB->removeFnAttr("memprof");
-}
-
-void llvm::memprof::addAmbiguousAttribute(CallBase *CB) {
- // We may have an existing ambiguous attribute if we are reanalyzing
- // after inlining.
- if (CB->hasFnAttr("memprof")) {
- assert(CB->getFnAttr("memprof").getValueAsString() == "ambiguous");
- } else {
- auto A = llvm::Attribute::get(CB->getContext(), "memprof", "ambiguous");
- CB->addFnAttr(A);
- }
-}
-
void CallStackTrie::addCallStack(
AllocationType AllocType, ArrayRef<uint64_t> StackIds,
std::vector<ContextTotalSize> ContextSizeInfo) {
@@ -484,9 +470,6 @@ void CallStackTrie::addSingleAllocTypeAttribute(CallBase *CI, AllocationType AT,
StringRef Descriptor) {
auto AllocTypeString = getAllocTypeAttributeString(AT);
auto A = llvm::Attribute::get(CI->getContext(), "memprof", AllocTypeString);
- // After inlining we may be able to convert an existing ambiguous allocation
- // to an unambiguous one.
- removeAnyExistingAmbiguousAttribute(CI);
CI->addFnAttr(A);
if (MemProfReportHintedSizes) {
std::vector<ContextTotalSize> ContextSizeInfo;
@@ -546,7 +529,6 @@ bool CallStackTrie::buildAndAttachMIBMetadata(CallBase *CI) {
assert(MIBCallStack.size() == 1 &&
"Should only be left with Alloc's location in stack");
CI->setMetadata(LLVMContext::MD_memprof, MDNode::get(Ctx, MIBNodes));
- addAmbiguousAttribute(CI);
return true;
}
// If there exists corner case that CallStackTrie has one chain to leaf
diff --git a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
index a317ac4..a60a4bb 100644
--- a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
+++ b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
@@ -67,7 +67,6 @@ using namespace llvm::memprof;
namespace llvm {
FunctionSummary::ForceSummaryHotnessType ForceSummaryEdgesCold =
FunctionSummary::FSHT_None;
-} // namespace llvm
static cl::opt<FunctionSummary::ForceSummaryHotnessType, true> FSEC(
"force-summary-edges-cold", cl::Hidden, cl::location(ForceSummaryEdgesCold),
@@ -91,6 +90,7 @@ LLVM_ABI extern cl::opt<bool> ScalePartialSampleProfileWorkingSetSize;
extern cl::opt<unsigned> MaxNumVTableAnnotations;
extern cl::opt<bool> MemProfReportHintedSizes;
+} // namespace llvm
// Walk through the operands of a given User via worklist iteration and populate
// the set of GlobalValue references encountered. Invoked either on an
diff --git a/llvm/lib/Analysis/ProfileSummaryInfo.cpp b/llvm/lib/Analysis/ProfileSummaryInfo.cpp
index f1c3155..44d7a17 100644
--- a/llvm/lib/Analysis/ProfileSummaryInfo.cpp
+++ b/llvm/lib/Analysis/ProfileSummaryInfo.cpp
@@ -24,6 +24,8 @@
#include <optional>
using namespace llvm;
+namespace llvm {
+
static cl::opt<bool> PartialProfile(
"partial-profile", cl::Hidden, cl::init(false),
cl::desc("Specify the current profile is used as a partial profile."));
@@ -44,6 +46,8 @@ static cl::opt<double> PartialSampleProfileWorkingSetSizeScaleFactor(
"and the factor to scale the working set size to use the same "
"shared thresholds as PGO."));
+} // end namespace llvm
+
// The profile summary metadata may be attached either by the frontend or by
// any backend passes (IR level instrumentation, for example). This method
// checks if the Summary is null and if so checks if the summary metadata is now
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index b08399b..63e1b14 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -3598,6 +3598,13 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
}
}
+ // TODO: Generalize to handle any common factors.
+ // udiv (mul nuw a, vscale), (mul nuw b, vscale) --> udiv a, b
+ const SCEV *NewLHS, *NewRHS;
+ if (match(LHS, m_scev_c_NUWMul(m_SCEV(NewLHS), m_SCEVVScale())) &&
+ match(RHS, m_scev_c_NUWMul(m_SCEV(NewRHS), m_SCEVVScale())))
+ return getUDivExpr(NewLHS, NewRHS);
+
// The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs
// changes). Make sure we get a new one.
IP = nullptr;
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 6f11b25..09a8fbe 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -7651,25 +7651,26 @@ static bool isGuaranteedNotToBeUndefOrPoison(
return true;
}
- if (const auto *PN = dyn_cast<PHINode>(V)) {
- unsigned Num = PN->getNumIncomingValues();
- bool IsWellDefined = true;
- for (unsigned i = 0; i < Num; ++i) {
- if (PN == PN->getIncomingValue(i))
- continue;
- auto *TI = PN->getIncomingBlock(i)->getTerminator();
- if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
- DT, Depth + 1, Kind)) {
- IsWellDefined = false;
- break;
+ if (!::canCreateUndefOrPoison(Opr, Kind,
+ /*ConsiderFlagsAndMetadata=*/true)) {
+ if (const auto *PN = dyn_cast<PHINode>(V)) {
+ unsigned Num = PN->getNumIncomingValues();
+ bool IsWellDefined = true;
+ for (unsigned i = 0; i < Num; ++i) {
+ if (PN == PN->getIncomingValue(i))
+ continue;
+ auto *TI = PN->getIncomingBlock(i)->getTerminator();
+ if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
+ DT, Depth + 1, Kind)) {
+ IsWellDefined = false;
+ break;
+ }
}
- }
- if (IsWellDefined)
+ if (IsWellDefined)
+ return true;
+ } else if (all_of(Opr->operands(), OpCheck))
return true;
- } else if (!::canCreateUndefOrPoison(Opr, Kind,
- /*ConsiderFlagsAndMetadata*/ true) &&
- all_of(Opr->operands(), OpCheck))
- return true;
+ }
}
if (auto *I = dyn_cast<LoadInst>(V))
diff --git a/llvm/lib/CAS/OnDiskTrieRawHashMap.cpp b/llvm/lib/CAS/OnDiskTrieRawHashMap.cpp
index 9b382dd7..9403893 100644
--- a/llvm/lib/CAS/OnDiskTrieRawHashMap.cpp
+++ b/llvm/lib/CAS/OnDiskTrieRawHashMap.cpp
@@ -114,7 +114,7 @@ public:
using SlotT = std::atomic<int64_t>;
static int64_t getSlotsSize(uint32_t NumBits) {
- return sizeof(int64_t) * (1u << NumBits);
+ return sizeof(int64_t) * (1ull << NumBits);
}
static int64_t getSize(uint32_t NumBits) {
@@ -191,7 +191,8 @@ private:
MutableArrayRef<SlotT> Slots;
static MutableArrayRef<SlotT> getSlots(Header &H) {
- return MutableArrayRef(reinterpret_cast<SlotT *>(&H + 1), 1u << H.NumBits);
+ return MutableArrayRef(reinterpret_cast<SlotT *>(&H + 1),
+ 1ull << H.NumBits);
}
};
diff --git a/llvm/lib/CGData/CodeGenData.cpp b/llvm/lib/CGData/CodeGenData.cpp
index b4f08c3..7900dc7 100644
--- a/llvm/lib/CGData/CodeGenData.cpp
+++ b/llvm/lib/CGData/CodeGenData.cpp
@@ -31,11 +31,14 @@ static cl::opt<bool>
static cl::opt<std::string>
CodeGenDataUsePath("codegen-data-use-path", cl::init(""), cl::Hidden,
cl::desc("File path to where .cgdata file is read"));
+
+namespace llvm {
cl::opt<bool> CodeGenDataThinLTOTwoRounds(
"codegen-data-thinlto-two-rounds", cl::init(false), cl::Hidden,
cl::desc("Enable two-round ThinLTO code generation. The first round "
"emits codegen data, while the second round uses the emitted "
"codegen data for further optimizations."));
+} // end namespace llvm
static std::string getCGDataErrString(cgdata_error Err,
const std::string &ErrMsg = "") {
diff --git a/llvm/lib/CGData/CodeGenDataReader.cpp b/llvm/lib/CGData/CodeGenDataReader.cpp
index 3fd8cfe..b1cd939 100644
--- a/llvm/lib/CGData/CodeGenDataReader.cpp
+++ b/llvm/lib/CGData/CodeGenDataReader.cpp
@@ -26,14 +26,14 @@ static cl::opt<bool> IndexedCodeGenDataReadFunctionMapNames(
"disabled to save memory and time for final consumption of the "
"indexed CodeGenData in production."));
+namespace llvm {
+
cl::opt<bool> IndexedCodeGenDataLazyLoading(
"indexed-codegen-data-lazy-loading", cl::init(false), cl::Hidden,
cl::desc(
"Lazily load indexed CodeGenData. Enable to save memory and time "
"for final consumption of the indexed CodeGenData in production."));
-namespace llvm {
-
static Expected<std::unique_ptr<MemoryBuffer>>
setupMemoryBuffer(const Twine &Filename, vfs::FileSystem &FS) {
auto BufferOrErr = Filename.str() == "-" ? MemoryBuffer::getSTDIN()
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 701a6a2..11efe49 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -473,11 +473,9 @@ bool AsmPrinter::doInitialization(Module &M) {
AddrLabelSymbols = nullptr;
// Initialize TargetLoweringObjectFile.
- const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
- .Initialize(OutContext, TM);
+ TM.getObjFileLowering()->Initialize(OutContext, TM);
- const_cast<TargetLoweringObjectFile &>(getObjFileLowering())
- .getModuleMetadata(M);
+ TM.getObjFileLowering()->getModuleMetadata(M);
// On AIX, we delay emitting any section information until
// after emitting the .file pseudo-op. This allows additional
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index 62fb5eb..3cfe7cc 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -1889,11 +1889,12 @@ DIE &DwarfUnit::constructMemberDIE(DIE &Buffer, const DIDerivedType *DT) {
bool IsBitfield = DT->isBitField();
// Handle the size.
- if (auto *Var = dyn_cast_or_null<DIVariable>(DT->getRawSizeInBits())) {
+ if (DT->getRawSizeInBits() == nullptr) {
+ // No size, just ignore.
+ } else if (auto *Var = dyn_cast<DIVariable>(DT->getRawSizeInBits())) {
if (auto *VarDIE = getDIE(Var))
addDIEEntry(MemberDie, dwarf::DW_AT_bit_size, *VarDIE);
- } else if (auto *Exp =
- dyn_cast_or_null<DIExpression>(DT->getRawSizeInBits())) {
+ } else if (auto *Exp = dyn_cast<DIExpression>(DT->getRawSizeInBits())) {
DIELoc *Loc = new (DIEValueAllocator) DIELoc;
DIEDwarfExpression DwarfExpr(*Asm, getCU(), *Loc);
DwarfExpr.setMemoryLocationKind();
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 56e13f0..884c3f1 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2362,6 +2362,13 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
MachineInstr::copyFlagsFromInstruction(CI));
return true;
}
+ case Intrinsic::modf: {
+ ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
+ MIRBuilder.buildModf(VRegs[0], VRegs[1],
+ getOrCreateVReg(*CI.getArgOperand(0)),
+ MachineInstr::copyFlagsFromInstruction(CI));
+ return true;
+ }
case Intrinsic::sincos: {
ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
MIRBuilder.buildFSincos(VRegs[0], VRegs[1],
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 03dfa6f..cffaf7c 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -471,6 +471,8 @@ static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
RTLIBCASE(TANH_F);
case TargetOpcode::G_FSINCOS:
RTLIBCASE(SINCOS_F);
+ case TargetOpcode::G_FMODF:
+ RTLIBCASE(MODF_F);
case TargetOpcode::G_FLOG10:
RTLIBCASE(LOG10_F);
case TargetOpcode::G_FLOG:
@@ -703,6 +705,46 @@ LegalizerHelper::LegalizeResult LegalizerHelper::emitSincosLibcall(
}
LegalizerHelper::LegalizeResult
+LegalizerHelper::emitModfLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
+ unsigned Size, Type *OpType,
+ LostDebugLocObserver &LocObserver) {
+ MachineFunction &MF = MIRBuilder.getMF();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ Register DstFrac = MI.getOperand(0).getReg();
+ Register DstInt = MI.getOperand(1).getReg();
+ Register Src = MI.getOperand(2).getReg();
+ LLT DstTy = MRI.getType(DstFrac);
+
+ int MemSize = DstTy.getSizeInBytes();
+ Align Alignment = getStackTemporaryAlignment(DstTy);
+ const DataLayout &DL = MIRBuilder.getDataLayout();
+ unsigned AddrSpace = DL.getAllocaAddrSpace();
+ MachinePointerInfo PtrInfo;
+
+ Register StackPtrInt =
+ createStackTemporary(TypeSize::getFixed(MemSize), Alignment, PtrInfo)
+ .getReg(0);
+
+ auto &Ctx = MF.getFunction().getContext();
+ auto LibcallResult = createLibcall(
+ MIRBuilder, getRTLibDesc(MI.getOpcode(), Size), {DstFrac, OpType, 0},
+ {{Src, OpType, 0}, {StackPtrInt, PointerType::get(Ctx, AddrSpace), 1}},
+ LocObserver, &MI);
+
+ if (LibcallResult != LegalizeResult::Legalized)
+ return LegalizerHelper::UnableToLegalize;
+
+ MachineMemOperand *LoadMMOInt = MF.getMachineMemOperand(
+ PtrInfo, MachineMemOperand::MOLoad, MemSize, Alignment);
+
+ MIRBuilder.buildLoad(DstInt, StackPtrInt, *LoadMMOInt);
+ MI.eraseFromParent();
+
+ return LegalizerHelper::Legalized;
+}
+
+LegalizerHelper::LegalizeResult
llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
MachineInstr &MI, LostDebugLocObserver &LocObserver) {
auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
@@ -1341,6 +1383,16 @@ LegalizerHelper::libcall(MachineInstr &MI, LostDebugLocObserver &LocObserver) {
}
return emitSincosLibcall(MI, MIRBuilder, Size, HLTy, LocObserver);
}
+ case TargetOpcode::G_FMODF: {
+ LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
+ unsigned Size = LLTy.getSizeInBits();
+ Type *HLTy = getFloatTypeForLLT(Ctx, LLTy);
+ if (!HLTy || (Size != 32 && Size != 64 && Size != 80 && Size != 128)) {
+ LLVM_DEBUG(dbgs() << "No libcall available for type " << LLTy << ".\n");
+ return UnableToLegalize;
+ }
+ return emitModfLibcall(MI, MIRBuilder, Size, HLTy, LocObserver);
+ }
case TargetOpcode::G_LROUND:
case TargetOpcode::G_LLROUND:
case TargetOpcode::G_INTRINSIC_LRINT:
@@ -3333,6 +3385,16 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
Observer.changedInstr(MI);
return Legalized;
+ case TargetOpcode::G_FMODF: {
+ Observer.changingInstr(MI);
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT);
+
+ widenScalarDst(MI, WideTy, 1, TargetOpcode::G_FPTRUNC);
+ MIRBuilder.setInsertPt(MIRBuilder.getMBB(), --MIRBuilder.getInsertPt());
+ widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
case TargetOpcode::G_FPOWI:
case TargetOpcode::G_FLDEXP:
case TargetOpcode::G_STRICT_FLDEXP: {
@@ -5472,6 +5534,7 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
case G_LROUND:
case G_LLROUND:
case G_INTRINSIC_TRUNC:
+ case G_FMODF:
case G_FCOS:
case G_FSIN:
case G_FTAN:
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
index 477e5c1..c2d474f 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
@@ -34,7 +34,7 @@ cl::opt<bool> llvm::DisableGISelLegalityCheck(
cl::desc("Don't verify that MIR is fully legal between GlobalISel passes"),
cl::Hidden);
-cl::opt<bool> VerboseVerifyLegalizerInfo(
+static cl::opt<bool> VerboseVerifyLegalizerInfo(
"verbose-gisel-verify-legalizer-info",
cl::desc("Print more information to dbgs about GlobalISel legalizer rules "
"being verified"),
diff --git a/llvm/lib/CodeGen/MachineRegionInfo.cpp b/llvm/lib/CodeGen/MachineRegionInfo.cpp
index f8268b8..366755a 100644
--- a/llvm/lib/CodeGen/MachineRegionInfo.cpp
+++ b/llvm/lib/CodeGen/MachineRegionInfo.cpp
@@ -10,6 +10,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/RegionInfoImpl.h"
#include "llvm/CodeGen/MachinePostDominators.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
@@ -127,7 +128,7 @@ LLVM_DUMP_METHOD void MachineRegionInfoPass::dump() const {
#endif
char MachineRegionInfoPass::ID = 0;
-char &MachineRegionInfoPassID = MachineRegionInfoPass::ID;
+char &llvm::MachineRegionInfoPassID = MachineRegionInfoPass::ID;
INITIALIZE_PASS_BEGIN(MachineRegionInfoPass, DEBUG_TYPE,
"Detect single entry single exit regions", true, true)
diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index 729a57e..e1d39d6 100644
--- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -1929,7 +1929,27 @@ ValueTrackerResult ValueTracker::getNextSourceFromCopy() {
const MachineOperand &Src = Def->getOperand(1);
if (Src.isUndef())
return ValueTrackerResult();
- return ValueTrackerResult(Src.getReg(), Src.getSubReg());
+
+ Register SrcReg = Src.getReg();
+ unsigned SubReg = Src.getSubReg();
+ if (DefSubReg) {
+ const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
+ SubReg = TRI->composeSubRegIndices(SubReg, DefSubReg);
+
+ if (SrcReg.isVirtual()) {
+ // TODO: Try constraining on rewrite if we can
+ const TargetRegisterClass *RegRC = MRI.getRegClass(SrcReg);
+ const TargetRegisterClass *SrcWithSubRC =
+ TRI->getSubClassWithSubReg(RegRC, SubReg);
+ if (RegRC != SrcWithSubRC)
+ return ValueTrackerResult();
+ } else {
+ if (!TRI->getSubReg(SrcReg, SubReg))
+ return ValueTrackerResult();
+ }
+ }
+
+ return ValueTrackerResult(SrcReg, SubReg);
}
ValueTrackerResult ValueTracker::getNextSourceFromBitcast() {
diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp
index 8e6cf3e..7fe13a3 100644
--- a/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -1406,8 +1406,24 @@ bool RAGreedy::trySplitAroundHintReg(MCPhysReg Hint,
continue;
// Check if VirtReg interferes with OtherReg after this COPY instruction.
- if (!IsDef && VirtReg.liveAt(LIS->getInstructionIndex(Instr).getRegSlot()))
- continue;
+ if (Opnd.readsReg()) {
+ SlotIndex Index = LIS->getInstructionIndex(Instr).getRegSlot();
+
+ if (SubReg) {
+ LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubReg);
+ if (IsDef)
+ Mask = ~Mask;
+
+ if (any_of(VirtReg.subranges(), [=](const LiveInterval::SubRange &S) {
+ return (S.LaneMask & Mask).any() && S.liveAt(Index);
+ })) {
+ continue;
+ }
+ } else {
+ if (VirtReg.liveAt(Index))
+ continue;
+ }
+ }
MCRegister OtherPhysReg =
OtherReg.isPhysical() ? OtherReg.asMCReg() : VRM->getPhys(OtherReg);
@@ -2419,25 +2435,28 @@ void RAGreedy::collectHintInfo(Register Reg, HintsInfo &Out) {
unsigned SubReg = Opnd.getSubReg();
// Get the current assignment.
- MCRegister OtherPhysReg =
- OtherReg.isPhysical() ? OtherReg.asMCReg() : VRM->getPhys(OtherReg);
- if (OtherSubReg) {
- if (OtherReg.isPhysical()) {
- MCRegister Tuple =
- TRI->getMatchingSuperReg(OtherPhysReg, OtherSubReg, RC);
- if (!Tuple)
- continue;
- OtherPhysReg = Tuple;
- } else {
- // TODO: There should be a hinting mechanism for subregisters
- if (SubReg != OtherSubReg)
- continue;
- }
+ MCRegister OtherPhysReg;
+ if (OtherReg.isPhysical()) {
+ if (OtherSubReg)
+ OtherPhysReg = TRI->getMatchingSuperReg(OtherReg, OtherSubReg, RC);
+ else if (SubReg)
+ OtherPhysReg = TRI->getMatchingSuperReg(OtherReg, SubReg, RC);
+ else
+ OtherPhysReg = OtherReg;
+ } else {
+ OtherPhysReg = VRM->getPhys(OtherReg);
+ // TODO: Should find matching superregister, but applying this in the
+ // non-hint case currently causes regressions
+
+ if (SubReg && OtherSubReg && SubReg != OtherSubReg)
+ continue;
}
// Push the collected information.
- Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg,
- OtherPhysReg));
+ if (OtherPhysReg) {
+ Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg,
+ OtherPhysReg));
+ }
}
}
@@ -2466,15 +2485,13 @@ void RAGreedy::tryHintRecoloring(const LiveInterval &VirtReg) {
// We have a broken hint, check if it is possible to fix it by
// reusing PhysReg for the copy-related live-ranges. Indeed, we evicted
// some register and PhysReg may be available for the other live-ranges.
- SmallSet<Register, 4> Visited;
- SmallVector<Register, 2> RecoloringCandidates;
HintsInfo Info;
Register Reg = VirtReg.reg();
MCRegister PhysReg = VRM->getPhys(Reg);
// Start the recoloring algorithm from the input live-interval, then
// it will propagate to the ones that are copy-related with it.
- Visited.insert(Reg);
- RecoloringCandidates.push_back(Reg);
+ SmallSet<Register, 4> Visited = {Reg};
+ SmallVector<Register, 2> RecoloringCandidates = {Reg};
LLVM_DEBUG(dbgs() << "Trying to reconcile hints for: " << printReg(Reg, TRI)
<< '(' << printReg(PhysReg, TRI) << ")\n");
@@ -2482,12 +2499,10 @@ void RAGreedy::tryHintRecoloring(const LiveInterval &VirtReg) {
do {
Reg = RecoloringCandidates.pop_back_val();
- // We cannot recolor physical register.
- if (Reg.isPhysical())
- continue;
+ MCRegister CurrPhys = VRM->getPhys(Reg);
// This may be a skipped register.
- if (!VRM->hasPhys(Reg)) {
+ if (!CurrPhys) {
assert(!shouldAllocateRegister(Reg) &&
"We have an unallocated variable which should have been handled");
continue;
@@ -2496,7 +2511,6 @@ void RAGreedy::tryHintRecoloring(const LiveInterval &VirtReg) {
// Get the live interval mapped with this virtual register to be able
// to check for the interference with the new color.
LiveInterval &LI = LIS->getInterval(Reg);
- MCRegister CurrPhys = VRM->getPhys(Reg);
// Check that the new color matches the register class constraints and
// that it is free for this live range.
if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) ||
@@ -2533,7 +2547,8 @@ void RAGreedy::tryHintRecoloring(const LiveInterval &VirtReg) {
// Push all copy-related live-ranges to keep reconciling the broken
// hints.
for (const HintInfo &HI : Info) {
- if (Visited.insert(HI.Reg).second)
+ // We cannot recolor physical register.
+ if (HI.Reg.isVirtual() && Visited.insert(HI.Reg).second)
RecoloringCandidates.push_back(HI.Reg);
}
} while (!RecoloringCandidates.empty());
diff --git a/llvm/lib/CodeGen/RegAllocScore.cpp b/llvm/lib/CodeGen/RegAllocScore.cpp
index 9c9cc1f..280946b 100644
--- a/llvm/lib/CodeGen/RegAllocScore.cpp
+++ b/llvm/lib/CodeGen/RegAllocScore.cpp
@@ -23,6 +23,8 @@
#include "llvm/Support/CommandLine.h"
using namespace llvm;
+
+namespace llvm {
LLVM_ABI cl::opt<double> CopyWeight("regalloc-copy-weight", cl::init(0.2),
cl::Hidden);
LLVM_ABI cl::opt<double> LoadWeight("regalloc-load-weight", cl::init(4.0),
@@ -33,6 +35,8 @@ LLVM_ABI cl::opt<double> CheapRematWeight("regalloc-cheap-remat-weight",
cl::init(0.2), cl::Hidden);
LLVM_ABI cl::opt<double> ExpensiveRematWeight("regalloc-expensive-remat-weight",
cl::init(1.0), cl::Hidden);
+} // end namespace llvm
+
#define DEBUG_TYPE "regalloc-score"
RegAllocScore &RegAllocScore::operator+=(const RegAllocScore &Other) {
diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 7ac1aef..ebfea8e 100644
--- a/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -584,14 +584,14 @@ bool CoalescerPair::isCoalescable(const MachineInstr *MI) const {
return DstReg == Dst;
// This is a partial register copy. Check that the parts match.
return Register(TRI.getSubReg(DstReg, SrcSub)) == Dst;
- } else {
- // DstReg is virtual.
- if (DstReg != Dst)
- return false;
- // Registers match, do the subregisters line up?
- return TRI.composeSubRegIndices(SrcIdx, SrcSub) ==
- TRI.composeSubRegIndices(DstIdx, DstSub);
}
+
+ // DstReg is virtual.
+ if (DstReg != Dst)
+ return false;
+ // Registers match, do the subregisters line up?
+ return TRI.composeSubRegIndices(SrcIdx, SrcSub) ==
+ TRI.composeSubRegIndices(DstIdx, DstSub);
}
void RegisterCoalescerLegacy::getAnalysisUsage(AnalysisUsage &AU) const {
@@ -2914,8 +2914,7 @@ JoinVals::ConflictResolution JoinVals::analyzeValue(unsigned ValNo,
if ((V.ValidLanes & OtherV.ValidLanes).any())
// Overlapping lanes can't be resolved.
return CR_Impossible;
- else
- return CR_Merge;
+ return CR_Merge;
}
// No simultaneous def. Is Other live at the def?
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 204e1f0..558c5a0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -12994,13 +12994,31 @@ SDValue DAGCombiner::foldPartialReduceMLAMulOp(SDNode *N) {
SDValue Op1 = N->getOperand(1);
SDValue Op2 = N->getOperand(2);
- APInt C;
- if (Op1->getOpcode() != ISD::MUL ||
- !ISD::isConstantSplatVector(Op2.getNode(), C) || !C.isOne())
+ unsigned Opc = Op1->getOpcode();
+ if (Opc != ISD::MUL && Opc != ISD::SHL)
return SDValue();
SDValue LHS = Op1->getOperand(0);
SDValue RHS = Op1->getOperand(1);
+
+ // Try to treat (shl %a, %c) as (mul %a, (1 << %c)) for constant %c.
+ if (Opc == ISD::SHL) {
+ APInt C;
+ if (!ISD::isConstantSplatVector(RHS.getNode(), C))
+ return SDValue();
+
+ RHS =
+ DAG.getSplatVector(RHS.getValueType(), DL,
+ DAG.getConstant(APInt(C.getBitWidth(), 1).shl(C), DL,
+ RHS.getValueType().getScalarType()));
+ Opc = ISD::MUL;
+ }
+
+ APInt C;
+ if (Opc != ISD::MUL || !ISD::isConstantSplatVector(Op2.getNode(), C) ||
+ !C.isOne())
+ return SDValue();
+
unsigned LHSOpcode = LHS->getOpcode();
if (!ISD::isExtOpcode(LHSOpcode))
return SDValue();
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index ff7cd66..87d5453 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -6256,17 +6256,17 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) {
// FIXME: Not all targets may support EVL in VP_LOAD. These will have been
// removed from the IR by the ExpandVectorPredication pass but we're
// reintroducing them here.
- EVT LdVT = LD->getMemoryVT();
- EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), LdVT);
- EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
- WideVT.getVectorElementCount());
+ EVT VT = LD->getValueType(0);
+ EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
+ EVT WideMaskVT = getSetCCResultType(WideVT);
+
if (ExtType == ISD::NON_EXTLOAD &&
TLI.isOperationLegalOrCustom(ISD::VP_LOAD, WideVT) &&
TLI.isTypeLegal(WideMaskVT)) {
SDLoc DL(N);
SDValue Mask = DAG.getAllOnesConstant(DL, WideMaskVT);
SDValue EVL = DAG.getElementCount(DL, TLI.getVPExplicitVectorLengthTy(),
- LdVT.getVectorElementCount());
+ VT.getVectorElementCount());
SDValue NewLoad =
DAG.getLoadVP(LD->getAddressingMode(), ISD::NON_EXTLOAD, WideVT, DL,
LD->getChain(), LD->getBasePtr(), LD->getOffset(), Mask,
@@ -6303,6 +6303,24 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) {
return Result;
}
+ if (VT.isVector()) {
+ // If all else fails replace the load with a wide masked load.
+ SDLoc DL(N);
+ EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
+
+ SDValue Len = DAG.getElementCount(DL, IdxVT, VT.getVectorElementCount());
+ SDValue Mask = DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, WideMaskVT,
+ DAG.getConstant(0, DL, IdxVT), Len);
+
+ SDValue NewLoad = DAG.getMaskedLoad(
+ WideVT, DL, LD->getChain(), LD->getBasePtr(), LD->getOffset(), Mask,
+ DAG.getPOISON(WideVT), LD->getMemoryVT(), LD->getMemOperand(),
+ LD->getAddressingMode(), LD->getExtensionType());
+
+ ReplaceValueWith(SDValue(N, 1), NewLoad.getValue(1));
+ return NewLoad;
+ }
+
report_fatal_error("Unable to widen vector load");
}
@@ -7516,8 +7534,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
SDValue StVal = ST->getValue();
EVT StVT = StVal.getValueType();
EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StVT);
- EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
- WideVT.getVectorElementCount());
+ EVT WideMaskVT = getSetCCResultType(WideVT);
if (TLI.isOperationLegalOrCustom(ISD::VP_STORE, WideVT) &&
TLI.isTypeLegal(WideMaskVT)) {
@@ -7540,6 +7557,22 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
return DAG.getNode(ISD::TokenFactor, SDLoc(ST), MVT::Other, StChain);
}
+ if (StVT.isVector()) {
+ // If all else fails replace the store with a wide masked store.
+ SDLoc DL(N);
+ EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
+
+ SDValue WideStVal = GetWidenedVector(StVal);
+ SDValue Len = DAG.getElementCount(DL, IdxVT, StVT.getVectorElementCount());
+ SDValue Mask = DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, WideMaskVT,
+ DAG.getConstant(0, DL, IdxVT), Len);
+
+ return DAG.getMaskedStore(ST->getChain(), DL, WideStVal, ST->getBasePtr(),
+ ST->getOffset(), Mask, ST->getMemoryVT(),
+ ST->getMemOperand(), ST->getAddressingMode(),
+ ST->isTruncatingStore());
+ }
+
report_fatal_error("Unable to widen vector store");
}
@@ -8298,8 +8331,7 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
AAMDNodes AAInfo = LD->getAAInfo();
if (LdVT.isScalableVector())
- report_fatal_error("Generating widen scalable extending vector loads is "
- "not yet supported");
+ return SDValue();
EVT EltVT = WidenVT.getVectorElementType();
EVT LdEltVT = LdVT.getVectorElementType();
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 8fc7eab..95f53fe 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4762,6 +4762,11 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
case ISD::AssertZext:
Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
return VTBits-Tmp;
+ case ISD::FREEZE:
+ if (isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedElts,
+ /*PoisonOnly=*/false))
+ return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ break;
case ISD::MERGE_VALUES:
return ComputeNumSignBits(Op.getOperand(Op.getResNo()), DemandedElts,
Depth + 1);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index b5201a3..c21890a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -8103,10 +8103,6 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::vector_partial_reduce_add: {
- if (!TLI.shouldExpandPartialReductionIntrinsic(cast<IntrinsicInst>(&I))) {
- visitTargetIntrinsic(I, Intrinsic);
- return;
- }
SDValue Acc = getValue(I.getOperand(0));
SDValue Input = getValue(I.getOperand(1));
setValue(&I,
diff --git a/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp b/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp
index 096a33c..64e5cd5 100644
--- a/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp
+++ b/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp
@@ -72,7 +72,7 @@ struct StackFrameLayoutAnalysis {
: Slot(Idx), Size(MFI.getObjectSize(Idx)),
Align(MFI.getObjectAlign(Idx).value()), Offset(Offset),
SlotTy(Invalid), Scalable(false) {
- Scalable = MFI.getStackID(Idx) == TargetStackID::ScalableVector;
+ Scalable = MFI.hasScalableStackID(Idx);
if (MFI.isSpillSlotObjectIndex(Idx))
SlotTy = SlotType::Spill;
else if (MFI.isFixedObjectIndex(Idx))
diff --git a/llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp b/llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp
index c1017d8..d973a47 100644
--- a/llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp
+++ b/llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp
@@ -148,7 +148,7 @@ std::error_code LVSplitContext::open(std::string ContextName,
return std::error_code();
}
-LVReader *CurrentReader = nullptr;
+static LVReader *CurrentReader = nullptr;
LVReader &LVReader::getInstance() {
if (CurrentReader)
return *CurrentReader;
diff --git a/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp b/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
index 23b72da..6e316f1 100644
--- a/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
@@ -280,6 +280,9 @@ std::vector<Block *> LinkGraph::splitBlockImpl(std::vector<Block *> Blocks,
void LinkGraph::dump(raw_ostream &OS) {
DenseMap<Block *, std::vector<Symbol *>> BlockSymbols;
+ OS << "LinkGraph \"" << getName()
+ << "\" (triple = " << getTargetTriple().str() << ")\n";
+
// Map from blocks to the symbols pointing at them.
for (auto *Sym : defined_symbols())
BlockSymbols[&Sym->getBlock()].push_back(Sym);
diff --git a/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp b/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
index 584b9f0..17050b0 100644
--- a/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
@@ -21,23 +21,21 @@ JITLinkerBase::~JITLinkerBase() = default;
void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) {
- LLVM_DEBUG({
- dbgs() << "Starting link phase 1 for graph " << G->getName() << "\n";
- });
+ LLVM_DEBUG(dbgs() << "Starting link phase 1\n");
// Prune and optimize the graph.
if (auto Err = runPasses(Passes.PrePrunePasses))
return Ctx->notifyFailed(std::move(Err));
LLVM_DEBUG({
- dbgs() << "Link graph \"" << G->getName() << "\" pre-pruning:\n";
+ dbgs() << "Link graph pre-pruning:\n";
G->dump(dbgs());
});
prune(*G);
LLVM_DEBUG({
- dbgs() << "Link graph \"" << G->getName() << "\" post-pruning:\n";
+ dbgs() << "Link graph post-pruning:\n";
G->dump(dbgs());
});
@@ -67,14 +65,15 @@ void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) {
void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self,
AllocResult AR) {
+ LLVM_DEBUG(dbgs() << "Starting link phase 2\n");
+
if (AR)
Alloc = std::move(*AR);
else
return Ctx->notifyFailed(AR.takeError());
LLVM_DEBUG({
- dbgs() << "Link graph \"" << G->getName()
- << "\" before post-allocation passes:\n";
+ dbgs() << "Link graph before post-allocation passes:\n";
G->dump(dbgs());
});
@@ -131,9 +130,7 @@ void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self,
void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self,
Expected<AsyncLookupResult> LR) {
- LLVM_DEBUG({
- dbgs() << "Starting link phase 3 for graph " << G->getName() << "\n";
- });
+ LLVM_DEBUG(dbgs() << "Starting link phase 3\n");
// If the lookup failed, bail out.
if (!LR)
@@ -143,8 +140,7 @@ void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self,
applyLookupResult(*LR);
LLVM_DEBUG({
- dbgs() << "Link graph \"" << G->getName()
- << "\" before pre-fixup passes:\n";
+ dbgs() << "Link graph before pre-fixup passes:\n";
G->dump(dbgs());
});
@@ -152,7 +148,7 @@ void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self,
return abandonAllocAndBailOut(std::move(Self), std::move(Err));
LLVM_DEBUG({
- dbgs() << "Link graph \"" << G->getName() << "\" before copy-and-fixup:\n";
+ dbgs() << "Link graph before copy-and-fixup:\n";
G->dump(dbgs());
});
@@ -161,7 +157,7 @@ void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self,
return abandonAllocAndBailOut(std::move(Self), std::move(Err));
LLVM_DEBUG({
- dbgs() << "Link graph \"" << G->getName() << "\" after copy-and-fixup:\n";
+ dbgs() << "Link graph after copy-and-fixup:\n";
G->dump(dbgs());
});
@@ -186,16 +182,14 @@ void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self,
void JITLinkerBase::linkPhase4(std::unique_ptr<JITLinkerBase> Self,
FinalizeResult FR) {
- LLVM_DEBUG({
- dbgs() << "Starting link phase 4 for graph " << G->getName() << "\n";
- });
+ LLVM_DEBUG(dbgs() << "Starting link phase 4\n");
if (!FR)
return Ctx->notifyFailed(FR.takeError());
Ctx->notifyFinalized(std::move(*FR));
- LLVM_DEBUG({ dbgs() << "Link of graph " << G->getName() << " complete\n"; });
+ LLVM_DEBUG({ dbgs() << "Link complete\n"; });
}
Error JITLinkerBase::runPasses(LinkGraphPassList &Passes) {
diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
index 09ac0f1..f794780 100644
--- a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
@@ -599,8 +599,7 @@ Expected<std::unique_ptr<LinkGraph>> createLinkGraphFromMachOObject_arm64(
}
static Error applyPACSigningToModInitPointers(LinkGraph &G) {
- assert(G.getTargetTriple().getSubArch() == Triple::AArch64SubArch_arm64e &&
- "PAC signing only valid for arm64e");
+ assert(G.getTargetTriple().isArm64e() && "PAC signing only valid for arm64e");
if (auto *ModInitSec = G.findSectionByName("__DATA,__mod_init_func")) {
for (auto *B : ModInitSec->blocks()) {
diff --git a/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp b/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp
index 92c62b8..2b33e56 100644
--- a/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp
+++ b/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp
@@ -113,6 +113,13 @@ static raw_ostream &operator<<(raw_ostream &OS,
return OS;
}
+static raw_ostream &operator<<(raw_ostream &OS,
+ const llvm::dxbc::StaticSamplerFlags &Flags) {
+ printFlags(OS, Flags, dxbc::getStaticSamplerFlags());
+
+ return OS;
+}
+
raw_ostream &operator<<(raw_ostream &OS, const dxbc::RootFlags &Flags) {
OS << "RootFlags(";
printFlags(OS, Flags, dxbc::getRootFlags());
@@ -172,7 +179,7 @@ raw_ostream &operator<<(raw_ostream &OS, const StaticSampler &Sampler) {
<< ", borderColor = " << Sampler.BorderColor
<< ", minLOD = " << Sampler.MinLOD << ", maxLOD = " << Sampler.MaxLOD
<< ", space = " << Sampler.Space << ", visibility = " << Sampler.Visibility
- << ")";
+ << ", flags = " << Sampler.Flags << ")";
return OS;
}
diff --git a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp
index 5785505..7a0cf40 100644
--- a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp
+++ b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp
@@ -218,6 +218,7 @@ MDNode *MetadataBuilder::BuildStaticSampler(const StaticSampler &Sampler) {
ConstantAsMetadata::get(Builder.getInt32(Sampler.Space)),
ConstantAsMetadata::get(
Builder.getInt32(to_underlying(Sampler.Visibility))),
+ ConstantAsMetadata::get(Builder.getInt32(to_underlying(Sampler.Flags))),
};
return MDNode::get(Ctx, Operands);
}
@@ -417,7 +418,7 @@ Error MetadataParser::parseDescriptorTable(mcdxbc::RootSignatureDesc &RSD,
Error MetadataParser::parseStaticSampler(mcdxbc::RootSignatureDesc &RSD,
MDNode *StaticSamplerNode) {
- if (StaticSamplerNode->getNumOperands() != 14)
+ if (StaticSamplerNode->getNumOperands() != 15)
return make_error<InvalidRSMetadataFormat>("Static Sampler");
mcdxbc::StaticSampler Sampler;
@@ -501,6 +502,17 @@ Error MetadataParser::parseStaticSampler(mcdxbc::RootSignatureDesc &RSD,
return Error(std::move(E));
Sampler.ShaderVisibility = *Visibility;
+ if (RSD.Version < 3) {
+ RSD.StaticSamplers.push_back(Sampler);
+ return Error::success();
+ }
+ assert(RSD.Version >= 3);
+
+ if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 14))
+ Sampler.Flags = *Val;
+ else
+ return make_error<InvalidRSMetadataValue>("Static Sampler Flags");
+
RSD.StaticSamplers.push_back(Sampler);
return Error::success();
}
diff --git a/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp b/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp
index 2c78d62..8a2b03d 100644
--- a/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp
+++ b/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp
@@ -40,7 +40,7 @@ bool verifyRootDescriptorFlag(uint32_t Version, uint32_t FlagsVal) {
if (Version == 1)
return Flags == FlagT::DataVolatile;
- assert(Version == 2 && "Provided invalid root signature version");
+ assert((Version <= 3) && "Provided invalid root signature version");
// The data-specific flags are mutually exclusive.
FlagT DataFlags = FlagT::DataVolatile | FlagT::DataStatic |
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index 54b92c9..245129f 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -127,7 +127,7 @@ static void orderValue(const Value *V, OrderMap &OM) {
if (OM.lookup(V))
return;
- if (const Constant *C = dyn_cast<Constant>(V)) {
+ if (const auto *C = dyn_cast<Constant>(V)) {
if (isa<ConstantData>(C))
return;
@@ -146,17 +146,17 @@ static void orderValue(const Value *V, OrderMap &OM) {
static OrderMap orderModule(const Module *M) {
OrderMap OM;
- auto orderConstantValue = [&OM](const Value *V) {
+ auto OrderConstantValue = [&OM](const Value *V) {
if (isa<Constant>(V) || isa<InlineAsm>(V))
orderValue(V, OM);
};
auto OrderConstantFromMetadata = [&](Metadata *MD) {
if (const auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
- orderConstantValue(VAM->getValue());
+ OrderConstantValue(VAM->getValue());
} else if (const auto *AL = dyn_cast<DIArgList>(MD)) {
for (const auto *VAM : AL->getArgs())
- orderConstantValue(VAM->getValue());
+ OrderConstantValue(VAM->getValue());
}
};
@@ -302,18 +302,18 @@ static UseListOrderMap predictUseListOrder(const Module *M) {
}
static const Module *getModuleFromVal(const Value *V) {
- if (const Argument *MA = dyn_cast<Argument>(V))
+ if (const auto *MA = dyn_cast<Argument>(V))
return MA->getParent() ? MA->getParent()->getParent() : nullptr;
- if (const BasicBlock *BB = dyn_cast<BasicBlock>(V))
+ if (const auto *BB = dyn_cast<BasicBlock>(V))
return BB->getParent() ? BB->getParent()->getParent() : nullptr;
- if (const Instruction *I = dyn_cast<Instruction>(V)) {
+ if (const auto *I = dyn_cast<Instruction>(V)) {
const Function *M = I->getParent() ? I->getParent()->getParent() : nullptr;
return M ? M->getParent() : nullptr;
}
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
+ if (const auto *GV = dyn_cast<GlobalValue>(V))
return GV->getParent();
if (const auto *MAV = dyn_cast<MetadataAsValue>(V)) {
@@ -337,7 +337,7 @@ static const Module *getModuleFromDPI(const DbgRecord *DR) {
return DR->getMarker() ? getModuleFromDPI(DR->getMarker()) : nullptr;
}
-static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
+static void printCallingConv(unsigned cc, raw_ostream &Out) {
switch (cc) {
default: Out << "cc" << cc; break;
case CallingConv::Fast: Out << "fastcc"; break;
@@ -484,7 +484,7 @@ void llvm::printLLVMNameWithoutPrefix(raw_ostream &OS, StringRef Name) {
/// Turn the specified name into an 'LLVM name', which is either prefixed with %
/// (if the string only contains simple characters) or is surrounded with ""'s
/// (if it has special chars in it). Print it out.
-static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) {
+static void printLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) {
switch (Prefix) {
case NoPrefix:
break;
@@ -506,12 +506,12 @@ static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) {
/// Turn the specified name into an 'LLVM name', which is either prefixed with %
/// (if the string only contains simple characters) or is surrounded with ""'s
/// (if it has special chars in it). Print it out.
-static void PrintLLVMName(raw_ostream &OS, const Value *V) {
- PrintLLVMName(OS, V->getName(),
+static void printLLVMName(raw_ostream &OS, const Value *V) {
+ printLLVMName(OS, V->getName(),
isa<GlobalValue>(V) ? GlobalPrefix : LocalPrefix);
}
-static void PrintShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef<int> Mask) {
+static void printShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef<int> Mask) {
Out << ", <";
if (isa<ScalableVectorType>(Ty))
Out << "vscale x ";
@@ -668,7 +668,7 @@ void TypePrinting::print(Type *Ty, raw_ostream &OS) {
return printStructBody(STy, OS);
if (!STy->getName().empty())
- return PrintLLVMName(OS, STy->getName(), LocalPrefix);
+ return printLLVMName(OS, STy->getName(), LocalPrefix);
incorporateTypes();
const auto I = Type2Number.find(STy);
@@ -999,26 +999,26 @@ void ModuleSlotTracker::setProcessHook(
}
static SlotTracker *createSlotTracker(const Value *V) {
- if (const Argument *FA = dyn_cast<Argument>(V))
+ if (const auto *FA = dyn_cast<Argument>(V))
return new SlotTracker(FA->getParent());
- if (const Instruction *I = dyn_cast<Instruction>(V))
+ if (const auto *I = dyn_cast<Instruction>(V))
if (I->getParent())
return new SlotTracker(I->getParent()->getParent());
- if (const BasicBlock *BB = dyn_cast<BasicBlock>(V))
+ if (const auto *BB = dyn_cast<BasicBlock>(V))
return new SlotTracker(BB->getParent());
- if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ if (const auto *GV = dyn_cast<GlobalVariable>(V))
return new SlotTracker(GV->getParent());
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
+ if (const auto *GA = dyn_cast<GlobalAlias>(V))
return new SlotTracker(GA->getParent());
- if (const GlobalIFunc *GIF = dyn_cast<GlobalIFunc>(V))
+ if (const auto *GIF = dyn_cast<GlobalIFunc>(V))
return new SlotTracker(GIF->getParent());
- if (const Function *Func = dyn_cast<Function>(V))
+ if (const auto *Func = dyn_cast<Function>(V))
return new SlotTracker(Func);
return nullptr;
@@ -1218,7 +1218,7 @@ void SlotTracker::processDbgRecordMetadata(const DbgRecord &DR) {
// but we can have faulty metadata from debug-intrinsic days being
// autoupgraded into debug records. This gets caught by the verifier, which
// then will print the faulty IR, hitting this code path.
- if (const DbgVariableRecord *DVR = dyn_cast<const DbgVariableRecord>(&DR)) {
+ if (const auto *DVR = dyn_cast<const DbgVariableRecord>(&DR)) {
// Process metadata used by DbgRecords; we only specifically care about the
// DILocalVariable, DILocation, and DIAssignID fields, as the Value and
// Expression fields should only be printed inline and so do not use a slot.
@@ -1233,7 +1233,7 @@ void SlotTracker::processDbgRecordMetadata(const DbgRecord &DR) {
if (auto *Empty = dyn_cast_if_present<MDNode>(DVR->getRawAddress()))
CreateMetadataSlot(Empty);
}
- } else if (const DbgLabelRecord *DLR = dyn_cast<const DbgLabelRecord>(&DR)) {
+ } else if (const auto *DLR = dyn_cast<const DbgLabelRecord>(&DR)) {
CreateMetadataSlot(DLR->getRawLabel());
} else {
llvm_unreachable("unsupported DbgRecord kind");
@@ -1244,12 +1244,12 @@ void SlotTracker::processDbgRecordMetadata(const DbgRecord &DR) {
void SlotTracker::processInstructionMetadata(const Instruction &I) {
// Process metadata used directly by intrinsics.
- if (const CallInst *CI = dyn_cast<CallInst>(&I))
+ if (const auto *CI = dyn_cast<CallInst>(&I))
if (Function *F = CI->getCalledFunction())
if (F->isIntrinsic())
for (auto &Op : I.operands())
if (auto *V = dyn_cast_or_null<MetadataAsValue>(Op))
- if (MDNode *N = dyn_cast<MDNode>(V->getMetadata()))
+ if (auto *N = dyn_cast<MDNode>(V->getMetadata()))
CreateMetadataSlot(N);
// Process metadata attached to this instruction.
@@ -1406,7 +1406,7 @@ void SlotTracker::CreateMetadataSlot(const MDNode *N) {
// Recursively add any MDNodes referenced by operands.
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- if (const MDNode *Op = dyn_cast_or_null<MDNode>(N->getOperand(i)))
+ if (const auto *Op = dyn_cast_or_null<MDNode>(N->getOperand(i)))
CreateMetadataSlot(Op);
}
@@ -1464,32 +1464,30 @@ struct AsmWriterContext {
// AsmWriter Implementation
//===----------------------------------------------------------------------===//
-static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
- AsmWriterContext &WriterCtx);
+static void writeAsOperandInternal(raw_ostream &Out, const Value *V,
+ AsmWriterContext &WriterCtx,
+ bool PrintType = false);
-static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
+static void writeAsOperandInternal(raw_ostream &Out, const Metadata *MD,
AsmWriterContext &WriterCtx,
bool FromValue = false);
-static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
- if (const FPMathOperator *FPO = dyn_cast<const FPMathOperator>(U))
+static void writeOptimizationInfo(raw_ostream &Out, const User *U) {
+ if (const auto *FPO = dyn_cast<const FPMathOperator>(U))
Out << FPO->getFastMathFlags();
- if (const OverflowingBinaryOperator *OBO =
- dyn_cast<OverflowingBinaryOperator>(U)) {
+ if (const auto *OBO = dyn_cast<OverflowingBinaryOperator>(U)) {
if (OBO->hasNoUnsignedWrap())
Out << " nuw";
if (OBO->hasNoSignedWrap())
Out << " nsw";
- } else if (const PossiblyExactOperator *Div =
- dyn_cast<PossiblyExactOperator>(U)) {
+ } else if (const auto *Div = dyn_cast<PossiblyExactOperator>(U)) {
if (Div->isExact())
Out << " exact";
- } else if (const PossiblyDisjointInst *PDI =
- dyn_cast<PossiblyDisjointInst>(U)) {
+ } else if (const auto *PDI = dyn_cast<PossiblyDisjointInst>(U)) {
if (PDI->isDisjoint())
Out << " disjoint";
- } else if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
+ } else if (const auto *GEP = dyn_cast<GEPOperator>(U)) {
if (GEP->isInBounds())
Out << " inbounds";
else if (GEP->hasNoUnsignedSignedWrap())
@@ -1514,7 +1512,7 @@ static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
}
}
-static void WriteAPFloatInternal(raw_ostream &Out, const APFloat &APF) {
+static void writeAPFloatInternal(raw_ostream &Out, const APFloat &APF) {
if (&APF.getSemantics() == &APFloat::IEEEsingle() ||
&APF.getSemantics() == &APFloat::IEEEdouble()) {
// We would like to output the FP constant value in exponential notation,
@@ -1607,9 +1605,9 @@ static void WriteAPFloatInternal(raw_ostream &Out, const APFloat &APF) {
llvm_unreachable("Unsupported floating point type");
}
-static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
+static void writeConstantInternal(raw_ostream &Out, const Constant *CV,
AsmWriterContext &WriterCtx) {
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
+ if (const auto *CI = dyn_cast<ConstantInt>(CV)) {
Type *Ty = CI->getType();
if (Ty->isVectorTy()) {
@@ -1629,7 +1627,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
return;
}
- if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
+ if (const auto *CFP = dyn_cast<ConstantFP>(CV)) {
Type *Ty = CFP->getType();
if (Ty->isVectorTy()) {
@@ -1638,7 +1636,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
Out << " ";
}
- WriteAPFloatInternal(Out, CFP->getValueAPF());
+ writeAPFloatInternal(Out, CFP->getValueAPF());
if (Ty->isVectorTy())
Out << ")";
@@ -1651,28 +1649,28 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
return;
}
- if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV)) {
+ if (const auto *BA = dyn_cast<BlockAddress>(CV)) {
Out << "blockaddress(";
- WriteAsOperandInternal(Out, BA->getFunction(), WriterCtx);
+ writeAsOperandInternal(Out, BA->getFunction(), WriterCtx);
Out << ", ";
- WriteAsOperandInternal(Out, BA->getBasicBlock(), WriterCtx);
+ writeAsOperandInternal(Out, BA->getBasicBlock(), WriterCtx);
Out << ")";
return;
}
if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(CV)) {
Out << "dso_local_equivalent ";
- WriteAsOperandInternal(Out, Equiv->getGlobalValue(), WriterCtx);
+ writeAsOperandInternal(Out, Equiv->getGlobalValue(), WriterCtx);
return;
}
if (const auto *NC = dyn_cast<NoCFIValue>(CV)) {
Out << "no_cfi ";
- WriteAsOperandInternal(Out, NC->getGlobalValue(), WriterCtx);
+ writeAsOperandInternal(Out, NC->getGlobalValue(), WriterCtx);
return;
}
- if (const ConstantPtrAuth *CPA = dyn_cast<ConstantPtrAuth>(CV)) {
+ if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV)) {
Out << "ptrauth (";
// ptrauth (ptr CST, i32 KEY[, i64 DISC[, ptr ADDRDISC]?]?)
@@ -1685,29 +1683,25 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
ListSeparator LS;
for (unsigned i = 0, e = NumOpsToWrite; i != e; ++i) {
Out << LS;
- WriterCtx.TypePrinter->print(CPA->getOperand(i)->getType(), Out);
- Out << ' ';
- WriteAsOperandInternal(Out, CPA->getOperand(i), WriterCtx);
+ writeAsOperandInternal(Out, CPA->getOperand(i), WriterCtx,
+ /*PrintType=*/true);
}
Out << ')';
return;
}
- if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) {
- Type *ETy = CA->getType()->getElementType();
+ if (const auto *CA = dyn_cast<ConstantArray>(CV)) {
Out << '[';
ListSeparator LS;
for (const Value *Op : CA->operands()) {
Out << LS;
- WriterCtx.TypePrinter->print(ETy, Out);
- Out << ' ';
- WriteAsOperandInternal(Out, Op, WriterCtx);
+ writeAsOperandInternal(Out, Op, WriterCtx, /*PrintType=*/true);
}
Out << ']';
return;
}
- if (const ConstantDataArray *CA = dyn_cast<ConstantDataArray>(CV)) {
+ if (const auto *CA = dyn_cast<ConstantDataArray>(CV)) {
// As a special case, print the array as a string if it is an array of
// i8 with ConstantInt values.
if (CA->isString()) {
@@ -1717,20 +1711,18 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
return;
}
- Type *ETy = CA->getType()->getElementType();
Out << '[';
ListSeparator LS;
for (uint64_t i = 0, e = CA->getNumElements(); i != e; ++i) {
Out << LS;
- WriterCtx.TypePrinter->print(ETy, Out);
- Out << ' ';
- WriteAsOperandInternal(Out, CA->getElementAsConstant(i), WriterCtx);
+ writeAsOperandInternal(Out, CA->getElementAsConstant(i), WriterCtx,
+ /*PrintType=*/true);
}
Out << ']';
return;
}
- if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) {
+ if (const auto *CS = dyn_cast<ConstantStruct>(CV)) {
if (CS->getType()->isPacked())
Out << '<';
Out << '{';
@@ -1739,9 +1731,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
ListSeparator LS;
for (const Value *Op : CS->operands()) {
Out << LS;
- WriterCtx.TypePrinter->print(Op->getType(), Out);
- Out << ' ';
- WriteAsOperandInternal(Out, Op, WriterCtx);
+ writeAsOperandInternal(Out, Op, WriterCtx, /*PrintType=*/true);
}
Out << ' ';
}
@@ -1753,7 +1743,6 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
if (isa<ConstantVector>(CV) || isa<ConstantDataVector>(CV)) {
auto *CVVTy = cast<FixedVectorType>(CV->getType());
- Type *ETy = CVVTy->getElementType();
// Use the same shorthand for splat vector (i.e. "splat(Ty val)") as is
// permitted on IR input to reduce the output changes when enabling
@@ -1763,9 +1752,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
if (auto *SplatVal = CV->getSplatValue()) {
if (isa<ConstantInt>(SplatVal) || isa<ConstantFP>(SplatVal)) {
Out << "splat (";
- WriterCtx.TypePrinter->print(ETy, Out);
- Out << ' ';
- WriteAsOperandInternal(Out, SplatVal, WriterCtx);
+ writeAsOperandInternal(Out, SplatVal, WriterCtx, /*PrintType=*/true);
Out << ')';
return;
}
@@ -1775,9 +1762,8 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
ListSeparator LS;
for (unsigned i = 0, e = CVVTy->getNumElements(); i != e; ++i) {
Out << LS;
- WriterCtx.TypePrinter->print(ETy, Out);
- Out << ' ';
- WriteAsOperandInternal(Out, CV->getAggregateElement(i), WriterCtx);
+ writeAsOperandInternal(Out, CV->getAggregateElement(i), WriterCtx,
+ /*PrintType=*/true);
}
Out << '>';
return;
@@ -1803,7 +1789,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
return;
}
- if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
+ if (const auto *CE = dyn_cast<ConstantExpr>(CV)) {
// Use the same shorthand for splat vector (i.e. "splat(Ty val)") as is
// permitted on IR input to reduce the output changes when enabling
// UseConstant{Int,FP}ForScalableSplat.
@@ -1813,9 +1799,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
if (auto *SplatVal = CE->getSplatValue()) {
if (isa<ConstantInt>(SplatVal) || isa<ConstantFP>(SplatVal)) {
Out << "splat (";
- WriterCtx.TypePrinter->print(SplatVal->getType(), Out);
- Out << ' ';
- WriteAsOperandInternal(Out, SplatVal, WriterCtx);
+ writeAsOperandInternal(Out, SplatVal, WriterCtx, /*PrintType=*/true);
Out << ')';
return;
}
@@ -1823,10 +1807,10 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
}
Out << CE->getOpcodeName();
- WriteOptimizationInfo(Out, CE);
+ writeOptimizationInfo(Out, CE);
Out << " (";
- if (const GEPOperator *GEP = dyn_cast<GEPOperator>(CE)) {
+ if (const auto *GEP = dyn_cast<GEPOperator>(CE)) {
WriterCtx.TypePrinter->print(GEP->getSourceElementType(), Out);
Out << ", ";
}
@@ -1834,9 +1818,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
ListSeparator LS;
for (const Value *Op : CE->operands()) {
Out << LS;
- WriterCtx.TypePrinter->print(Op->getType(), Out);
- Out << ' ';
- WriteAsOperandInternal(Out, Op, WriterCtx);
+ writeAsOperandInternal(Out, Op, WriterCtx, /*PrintType=*/true);
}
if (CE->isCast()) {
@@ -1845,7 +1827,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
}
if (CE->getOpcode() == Instruction::ShuffleVector)
- PrintShuffleMask(Out, CE->getType(), CE->getShuffleMask());
+ printShuffleMask(Out, CE->getType(), CE->getShuffleMask());
Out << ')';
return;
@@ -1864,11 +1846,9 @@ static void writeMDTuple(raw_ostream &Out, const MDTuple *Node,
Out << "null";
} else if (auto *MDV = dyn_cast<ValueAsMetadata>(MD)) {
Value *V = MDV->getValue();
- WriterCtx.TypePrinter->print(V->getType(), Out);
- Out << ' ';
- WriteAsOperandInternal(Out, V, WriterCtx);
+ writeAsOperandInternal(Out, V, WriterCtx, /*PrintType=*/true);
} else {
- WriteAsOperandInternal(Out, MD, WriterCtx);
+ writeAsOperandInternal(Out, MD, WriterCtx);
WriterCtx.onWriteMetadataAsOperand(MD);
}
}
@@ -1956,7 +1936,7 @@ static void writeMetadataAsOperand(raw_ostream &Out, const Metadata *MD,
Out << "null";
return;
}
- WriteAsOperandInternal(Out, MD, WriterCtx);
+ writeAsOperandInternal(Out, MD, WriterCtx);
WriterCtx.onWriteMetadataAsOperand(MD);
}
@@ -2634,9 +2614,9 @@ static void writeDIArgList(raw_ostream &Out, const DIArgList *N,
Out << "!DIArgList(";
ListSeparator FS;
MDFieldPrinter Printer(Out, WriterCtx);
- for (Metadata *Arg : N->getArgs()) {
+ for (const Metadata *Arg : N->getArgs()) {
Out << FS;
- WriteAsOperandInternal(Out, Arg, WriterCtx, true);
+ writeAsOperandInternal(Out, Arg, WriterCtx, true);
}
Out << ")";
}
@@ -2679,7 +2659,7 @@ static void writeDIImportedEntity(raw_ostream &Out, const DIImportedEntity *N,
Out << ")";
}
-static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node,
+static void writeMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node,
AsmWriterContext &Ctx) {
if (Node->isDistinct())
Out << "distinct ";
@@ -2699,21 +2679,27 @@ static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node,
// Full implementation of printing a Value as an operand with support for
// TypePrinting, etc.
-static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
- AsmWriterContext &WriterCtx) {
+static void writeAsOperandInternal(raw_ostream &Out, const Value *V,
+ AsmWriterContext &WriterCtx,
+ bool PrintType) {
+ if (PrintType) {
+ WriterCtx.TypePrinter->print(V->getType(), Out);
+ Out << ' ';
+ }
+
if (V->hasName()) {
- PrintLLVMName(Out, V);
+ printLLVMName(Out, V);
return;
}
- const Constant *CV = dyn_cast<Constant>(V);
+ const auto *CV = dyn_cast<Constant>(V);
if (CV && !isa<GlobalValue>(CV)) {
assert(WriterCtx.TypePrinter && "Constants require TypePrinting!");
- WriteConstantInternal(Out, CV, WriterCtx);
+ writeConstantInternal(Out, CV, WriterCtx);
return;
}
- if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) {
+ if (const auto *IA = dyn_cast<InlineAsm>(V)) {
Out << "asm ";
if (IA->hasSideEffects())
Out << "sideeffect ";
@@ -2733,7 +2719,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
}
if (auto *MD = dyn_cast<MetadataAsValue>(V)) {
- WriteAsOperandInternal(Out, MD->getMetadata(), WriterCtx,
+ writeAsOperandInternal(Out, MD->getMetadata(), WriterCtx,
/* FromValue */ true);
return;
}
@@ -2743,7 +2729,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
auto *Machine = WriterCtx.Machine;
// If we have a SlotTracker, use it.
if (Machine) {
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ if (const auto *GV = dyn_cast<GlobalValue>(V)) {
Slot = Machine->getGlobalSlot(GV);
Prefix = '@';
} else {
@@ -2760,7 +2746,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
}
} else if ((Machine = createSlotTracker(V))) {
// Otherwise, create one to get the # and then destroy it.
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ if (const auto *GV = dyn_cast<GlobalValue>(V)) {
Slot = Machine->getGlobalSlot(GV);
Prefix = '@';
} else {
@@ -2778,21 +2764,21 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
Out << "<badref>";
}
-static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
+static void writeAsOperandInternal(raw_ostream &Out, const Metadata *MD,
AsmWriterContext &WriterCtx,
bool FromValue) {
// Write DIExpressions and DIArgLists inline when used as a value. Improves
// readability of debug info intrinsics.
- if (const DIExpression *Expr = dyn_cast<DIExpression>(MD)) {
+ if (const auto *Expr = dyn_cast<DIExpression>(MD)) {
writeDIExpression(Out, Expr, WriterCtx);
return;
}
- if (const DIArgList *ArgList = dyn_cast<DIArgList>(MD)) {
+ if (const auto *ArgList = dyn_cast<DIArgList>(MD)) {
writeDIArgList(Out, ArgList, WriterCtx, FromValue);
return;
}
- if (const MDNode *N = dyn_cast<MDNode>(MD)) {
+ if (const auto *N = dyn_cast<MDNode>(MD)) {
std::unique_ptr<SlotTracker> MachineStorage;
SaveAndRestore SARMachine(WriterCtx.Machine);
if (!WriterCtx.Machine) {
@@ -2801,7 +2787,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
}
int Slot = WriterCtx.Machine->getMetadataSlot(N);
if (Slot == -1) {
- if (const DILocation *Loc = dyn_cast<DILocation>(N)) {
+ if (const auto *Loc = dyn_cast<DILocation>(N)) {
writeDILocation(Out, Loc, WriterCtx);
return;
}
@@ -2813,7 +2799,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
return;
}
- if (const MDString *MDS = dyn_cast<MDString>(MD)) {
+ if (const auto *MDS = dyn_cast<MDString>(MD)) {
Out << "!\"";
printEscapedString(MDS->getString(), Out);
Out << '"';
@@ -2825,9 +2811,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
assert((FromValue || !isa<LocalAsMetadata>(V)) &&
"Unexpected function-local metadata outside of value argument");
- WriterCtx.TypePrinter->print(V->getValue()->getType(), Out);
- Out << ' ';
- WriteAsOperandInternal(Out, V->getValue(), WriterCtx);
+ writeAsOperandInternal(Out, V->getValue(), WriterCtx, /*PrintType=*/true);
}
namespace {
@@ -2902,7 +2886,7 @@ public:
void printDbgRecord(const DbgRecord &DR);
void printDbgRecordLine(const DbgRecord &DR);
- void printUseListOrder(const Value *V, const std::vector<unsigned> &Shuffle);
+ void printUseListOrder(const Value *V, ArrayRef<unsigned> Shuffle);
void printUseLists(const Function *F);
void printModuleSummaryIndex();
@@ -2914,16 +2898,14 @@ public:
void printTypeIdSummary(const TypeIdSummary &TIS);
void printTypeIdCompatibleVtableSummary(const TypeIdCompatibleVtableInfo &TI);
void printTypeTestResolution(const TypeTestResolution &TTRes);
- void printArgs(const std::vector<uint64_t> &Args);
+ void printArgs(ArrayRef<uint64_t> Args);
void printWPDRes(const WholeProgramDevirtResolution &WPDRes);
void printTypeIdInfo(const FunctionSummary::TypeIdInfo &TIDInfo);
void printVFuncId(const FunctionSummary::VFuncId VFId);
- void
- printNonConstVCalls(const std::vector<FunctionSummary::VFuncId> &VCallList,
- const char *Tag);
- void
- printConstVCalls(const std::vector<FunctionSummary::ConstVCall> &VCallList,
- const char *Tag);
+ void printNonConstVCalls(ArrayRef<FunctionSummary::VFuncId> VCallList,
+ const char *Tag);
+ void printConstVCalls(ArrayRef<FunctionSummary::ConstVCall> VCallList,
+ const char *Tag);
private:
/// Print out metadata attachments.
@@ -2965,12 +2947,8 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) {
Out << "<null operand!>";
return;
}
- if (PrintType) {
- TypePrinter.print(Operand->getType(), Out);
- Out << ' ';
- }
- auto WriterCtx = getContext();
- WriteAsOperandInternal(Out, Operand, WriterCtx);
+ auto WriteCtx = getContext();
+ writeAsOperandInternal(Out, Operand, WriteCtx, PrintType);
}
void AssemblyWriter::writeSyncScope(const LLVMContext &Context,
@@ -3030,7 +3008,7 @@ void AssemblyWriter::writeParamOperand(const Value *Operand,
Out << ' ';
// Print the operand
auto WriterCtx = getContext();
- WriteAsOperandInternal(Out, Operand, WriterCtx);
+ writeAsOperandInternal(Out, Operand, WriterCtx);
}
void AssemblyWriter::writeOperandBundles(const CallBase *Call) {
@@ -3049,20 +3027,14 @@ void AssemblyWriter::writeOperandBundles(const CallBase *Call) {
Out << '(';
- bool FirstInput = true;
+ ListSeparator InnerLS;
auto WriterCtx = getContext();
for (const auto &Input : BU.Inputs) {
- if (!FirstInput)
- Out << ", ";
- FirstInput = false;
-
+ Out << InnerLS;
if (Input == nullptr)
Out << "<null operand bundle!>";
- else {
- TypePrinter.print(Input->getType(), Out);
- Out << " ";
- WriteAsOperandInternal(Out, Input, WriterCtx);
- }
+ else
+ writeAsOperandInternal(Out, Input, WriterCtx, /*PrintType=*/true);
}
Out << ')';
@@ -3334,14 +3306,8 @@ void AssemblyWriter::printTypeIdCompatibleVtableSummary(
Out << ")";
}
-void AssemblyWriter::printArgs(const std::vector<uint64_t> &Args) {
- Out << "args: (";
- ListSeparator FS;
- for (auto arg : Args) {
- Out << FS;
- Out << arg;
- }
- Out << ")";
+void AssemblyWriter::printArgs(ArrayRef<uint64_t> Args) {
+ Out << "args: (" << llvm::interleaved(Args) << ')';
}
void AssemblyWriter::printWPDRes(const WholeProgramDevirtResolution &WPDRes) {
@@ -3681,7 +3647,7 @@ void AssemblyWriter::printVFuncId(const FunctionSummary::VFuncId VFId) {
}
void AssemblyWriter::printNonConstVCalls(
- const std::vector<FunctionSummary::VFuncId> &VCallList, const char *Tag) {
+ ArrayRef<FunctionSummary::VFuncId> VCallList, const char *Tag) {
Out << Tag << ": (";
ListSeparator FS;
for (auto &VFuncId : VCallList) {
@@ -3692,8 +3658,7 @@ void AssemblyWriter::printNonConstVCalls(
}
void AssemblyWriter::printConstVCalls(
- const std::vector<FunctionSummary::ConstVCall> &VCallList,
- const char *Tag) {
+ ArrayRef<FunctionSummary::ConstVCall> VCallList, const char *Tag) {
Out << Tag << ": (";
ListSeparator FS;
for (auto &ConstVCall : VCallList) {
@@ -3816,7 +3781,7 @@ void AssemblyWriter::printNamedMDNode(const NamedMDNode *NMD) {
Out << "}\n";
}
-static void PrintVisibility(GlobalValue::VisibilityTypes Vis,
+static void printVisibility(GlobalValue::VisibilityTypes Vis,
formatted_raw_ostream &Out) {
switch (Vis) {
case GlobalValue::DefaultVisibility: break;
@@ -3825,13 +3790,13 @@ static void PrintVisibility(GlobalValue::VisibilityTypes Vis,
}
}
-static void PrintDSOLocation(const GlobalValue &GV,
+static void printDSOLocation(const GlobalValue &GV,
formatted_raw_ostream &Out) {
if (GV.isDSOLocal() && !GV.isImplicitDSOLocal())
Out << "dso_local ";
}
-static void PrintDLLStorageClass(GlobalValue::DLLStorageClassTypes SCT,
+static void printDLLStorageClass(GlobalValue::DLLStorageClassTypes SCT,
formatted_raw_ostream &Out) {
switch (SCT) {
case GlobalValue::DefaultStorageClass: break;
@@ -3840,7 +3805,7 @@ static void PrintDLLStorageClass(GlobalValue::DLLStorageClassTypes SCT,
}
}
-static void PrintThreadLocalModel(GlobalVariable::ThreadLocalMode TLM,
+static void printThreadLocalModel(GlobalVariable::ThreadLocalMode TLM,
formatted_raw_ostream &Out) {
switch (TLM) {
case GlobalVariable::NotThreadLocal:
@@ -3886,7 +3851,7 @@ static void maybePrintComdat(formatted_raw_ostream &Out,
return;
Out << '(';
- PrintLLVMName(Out, C->getName(), ComdatPrefix);
+ printLLVMName(Out, C->getName(), ComdatPrefix);
Out << ')';
}
@@ -3895,17 +3860,17 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
Out << "; Materializable\n";
AsmWriterContext WriterCtx(&TypePrinter, &Machine, GV->getParent());
- WriteAsOperandInternal(Out, GV, WriterCtx);
+ writeAsOperandInternal(Out, GV, WriterCtx);
Out << " = ";
if (!GV->hasInitializer() && GV->hasExternalLinkage())
Out << "external ";
Out << getLinkageNameWithSpace(GV->getLinkage());
- PrintDSOLocation(*GV, Out);
- PrintVisibility(GV->getVisibility(), Out);
- PrintDLLStorageClass(GV->getDLLStorageClass(), Out);
- PrintThreadLocalModel(GV->getThreadLocalMode(), Out);
+ printDSOLocation(*GV, Out);
+ printVisibility(GV->getVisibility(), Out);
+ printDLLStorageClass(GV->getDLLStorageClass(), Out);
+ printThreadLocalModel(GV->getThreadLocalMode(), Out);
StringRef UA = getUnnamedAddrEncoding(GV->getUnnamedAddr());
if (!UA.empty())
Out << UA << ' ';
@@ -3986,14 +3951,14 @@ void AssemblyWriter::printAlias(const GlobalAlias *GA) {
Out << "; Materializable\n";
AsmWriterContext WriterCtx(&TypePrinter, &Machine, GA->getParent());
- WriteAsOperandInternal(Out, GA, WriterCtx);
+ writeAsOperandInternal(Out, GA, WriterCtx);
Out << " = ";
Out << getLinkageNameWithSpace(GA->getLinkage());
- PrintDSOLocation(*GA, Out);
- PrintVisibility(GA->getVisibility(), Out);
- PrintDLLStorageClass(GA->getDLLStorageClass(), Out);
- PrintThreadLocalModel(GA->getThreadLocalMode(), Out);
+ printDSOLocation(*GA, Out);
+ printVisibility(GA->getVisibility(), Out);
+ printDLLStorageClass(GA->getDLLStorageClass(), Out);
+ printThreadLocalModel(GA->getThreadLocalMode(), Out);
StringRef UA = getUnnamedAddrEncoding(GA->getUnnamedAddr());
if (!UA.empty())
Out << UA << ' ';
@@ -4025,12 +3990,12 @@ void AssemblyWriter::printIFunc(const GlobalIFunc *GI) {
Out << "; Materializable\n";
AsmWriterContext WriterCtx(&TypePrinter, &Machine, GI->getParent());
- WriteAsOperandInternal(Out, GI, WriterCtx);
+ writeAsOperandInternal(Out, GI, WriterCtx);
Out << " = ";
Out << getLinkageNameWithSpace(GI->getLinkage());
- PrintDSOLocation(*GI, Out);
- PrintVisibility(GI->getVisibility(), Out);
+ printDSOLocation(*GI, Out);
+ printVisibility(GI->getVisibility(), Out);
Out << "ifunc ";
@@ -4082,7 +4047,7 @@ void AssemblyWriter::printTypeIdentities() {
auto &NamedTypes = TypePrinter.getNamedTypes();
for (StructType *NamedType : NamedTypes) {
- PrintLLVMName(Out, NamedType->getName(), LocalPrefix);
+ printLLVMName(Out, NamedType->getName(), LocalPrefix);
Out << " = type ";
// Make sure we print out at least one level of the type structure, so
@@ -4130,13 +4095,13 @@ void AssemblyWriter::printFunction(const Function *F) {
Out << "define ";
Out << getLinkageNameWithSpace(F->getLinkage());
- PrintDSOLocation(*F, Out);
- PrintVisibility(F->getVisibility(), Out);
- PrintDLLStorageClass(F->getDLLStorageClass(), Out);
+ printDSOLocation(*F, Out);
+ printVisibility(F->getVisibility(), Out);
+ printDLLStorageClass(F->getDLLStorageClass(), Out);
// Print the calling convention.
if (F->getCallingConv() != CallingConv::C) {
- PrintCallingConv(F->getCallingConv(), Out);
+ printCallingConv(F->getCallingConv(), Out);
Out << " ";
}
@@ -4146,7 +4111,7 @@ void AssemblyWriter::printFunction(const Function *F) {
TypePrinter.print(F->getReturnType(), Out);
AsmWriterContext WriterCtx(&TypePrinter, &Machine, F->getParent());
Out << ' ';
- WriteAsOperandInternal(Out, F, WriterCtx);
+ writeAsOperandInternal(Out, F, WriterCtx);
Out << '(';
// Loop over the arguments, printing them...
@@ -4262,7 +4227,7 @@ void AssemblyWriter::printArgument(const Argument *Arg, AttributeSet Attrs) {
// Output name, if available...
if (Arg->hasName()) {
Out << ' ';
- PrintLLVMName(Out, Arg);
+ printLLVMName(Out, Arg);
} else {
int Slot = Machine.getLocalSlot(Arg);
assert(Slot != -1 && "expect argument in function here");
@@ -4275,7 +4240,7 @@ void AssemblyWriter::printBasicBlock(const BasicBlock *BB) {
bool IsEntryBlock = BB->getParent() && BB->isEntryBlock();
if (BB->hasName()) { // Print out the label if it exists...
Out << "\n";
- PrintLLVMName(Out, BB->getName(), LabelPrefix);
+ printLLVMName(Out, BB->getName(), LabelPrefix);
Out << ':';
} else if (!IsEntryBlock) {
Out << "\n";
@@ -4393,7 +4358,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// Print out name if it exists...
if (I.hasName()) {
- PrintLLVMName(Out, &I);
+ printLLVMName(Out, &I);
Out << " = ";
} else if (!I.getType()->isVoidTy()) {
// Print out the def slot taken.
@@ -4404,7 +4369,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << '%' << SlotNum << " = ";
}
- if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
+ if (const auto *CI = dyn_cast<CallInst>(&I)) {
if (CI->isMustTailCall())
Out << "musttail ";
else if (CI->isTailCall())
@@ -4432,14 +4397,14 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << " volatile";
// Print out optimization information.
- WriteOptimizationInfo(Out, &I);
+ writeOptimizationInfo(Out, &I);
// Print out the compare instruction predicates
- if (const CmpInst *CI = dyn_cast<CmpInst>(&I))
+ if (const auto *CI = dyn_cast<CmpInst>(&I))
Out << ' ' << CI->getPredicate();
// Print out the atomicrmw operation
- if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I))
+ if (const auto *RMWI = dyn_cast<AtomicRMWInst>(&I))
Out << ' ' << AtomicRMWInst::getOperationName(RMWI->getOperation());
// Print out the type of the operands...
@@ -4482,29 +4447,32 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperand(I.getOperand(i), true);
}
Out << ']';
- } else if (const PHINode *PN = dyn_cast<PHINode>(&I)) {
+ } else if (const auto *PN = dyn_cast<PHINode>(&I)) {
Out << ' ';
TypePrinter.print(I.getType(), Out);
Out << ' ';
ListSeparator LS;
- for (unsigned op = 0, Eop = PN->getNumIncomingValues(); op < Eop; ++op) {
+ for (const auto &[V, Block] :
+ zip_equal(PN->incoming_values(), PN->blocks())) {
Out << LS << "[ ";
- writeOperand(PN->getIncomingValue(op), false); Out << ", ";
- writeOperand(PN->getIncomingBlock(op), false); Out << " ]";
+ writeOperand(V, false);
+ Out << ", ";
+ writeOperand(Block, false);
+ Out << " ]";
}
- } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&I)) {
+ } else if (const auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
Out << ' ';
writeOperand(I.getOperand(0), true);
- for (unsigned i : EVI->indices())
- Out << ", " << i;
- } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&I)) {
+ Out << ", ";
+ Out << llvm::interleaved(EVI->indices());
+ } else if (const auto *IVI = dyn_cast<InsertValueInst>(&I)) {
Out << ' ';
writeOperand(I.getOperand(0), true); Out << ", ";
writeOperand(I.getOperand(1), true);
- for (unsigned i : IVI->indices())
- Out << ", " << i;
- } else if (const LandingPadInst *LPI = dyn_cast<LandingPadInst>(&I)) {
+ Out << ", ";
+ Out << llvm::interleaved(IVI->indices());
+ } else if (const auto *LPI = dyn_cast<LandingPadInst>(&I)) {
Out << ' ';
TypePrinter.print(I.getType(), Out);
if (LPI->isCleanup() || LPI->getNumClauses() != 0)
@@ -4563,11 +4531,11 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperand(CRI->getOperand(1), /*PrintType=*/true);
else
Out << "to caller";
- } else if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
+ } else if (const auto *CI = dyn_cast<CallInst>(&I)) {
// Print the calling convention being used.
if (CI->getCallingConv() != CallingConv::C) {
Out << " ";
- PrintCallingConv(CI->getCallingConv(), Out);
+ printCallingConv(CI->getCallingConv(), Out);
}
Operand = CI->getCalledOperand();
@@ -4610,7 +4578,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttrs());
writeOperandBundles(CI);
- } else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) {
+ } else if (const auto *II = dyn_cast<InvokeInst>(&I)) {
Operand = II->getCalledOperand();
FunctionType *FTy = II->getFunctionType();
Type *RetTy = FTy->getReturnType();
@@ -4619,7 +4587,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// Print the calling convention being used.
if (II->getCallingConv() != CallingConv::C) {
Out << " ";
- PrintCallingConv(II->getCallingConv(), Out);
+ printCallingConv(II->getCallingConv(), Out);
}
if (PAL.hasRetAttrs())
@@ -4653,7 +4621,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperand(II->getNormalDest(), true);
Out << " unwind ";
writeOperand(II->getUnwindDest(), true);
- } else if (const CallBrInst *CBI = dyn_cast<CallBrInst>(&I)) {
+ } else if (const auto *CBI = dyn_cast<CallBrInst>(&I)) {
Operand = CBI->getCalledOperand();
FunctionType *FTy = CBI->getFunctionType();
Type *RetTy = FTy->getReturnType();
@@ -4662,7 +4630,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// Print the calling convention being used.
if (CBI->getCallingConv() != CallingConv::C) {
Out << " ";
- PrintCallingConv(CBI->getCallingConv(), Out);
+ printCallingConv(CBI->getCallingConv(), Out);
}
if (PAL.hasRetAttrs())
@@ -4698,7 +4666,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperand(Dest, true);
}
Out << ']';
- } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
+ } else if (const auto *AI = dyn_cast<AllocaInst>(&I)) {
Out << ' ';
if (AI->isUsedWithInAlloca())
Out << "inalloca ";
@@ -4720,9 +4688,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
}
unsigned AddrSpace = AI->getAddressSpace();
- if (AddrSpace != 0) {
+ if (AddrSpace != 0)
Out << ", addrspace(" << AddrSpace << ')';
- }
} else if (isa<CastInst>(I)) {
if (Operand) {
Out << ' ';
@@ -4737,7 +4704,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
}
Out << ", ";
TypePrinter.print(I.getType(), Out);
- } else if (Operand) { // Print the normal way.
+ } else if (Operand) { // Print the normal way.
if (const auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
Out << ' ';
TypePrinter.print(GEP->getSourceElementType(), Out);
@@ -4786,28 +4753,28 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
}
// Print atomic ordering/alignment for memory operations
- if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
+ if (const auto *LI = dyn_cast<LoadInst>(&I)) {
if (LI->isAtomic())
writeAtomic(LI->getContext(), LI->getOrdering(), LI->getSyncScopeID());
if (MaybeAlign A = LI->getAlign())
Out << ", align " << A->value();
- } else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
+ } else if (const auto *SI = dyn_cast<StoreInst>(&I)) {
if (SI->isAtomic())
writeAtomic(SI->getContext(), SI->getOrdering(), SI->getSyncScopeID());
if (MaybeAlign A = SI->getAlign())
Out << ", align " << A->value();
- } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
+ } else if (const auto *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
writeAtomicCmpXchg(CXI->getContext(), CXI->getSuccessOrdering(),
CXI->getFailureOrdering(), CXI->getSyncScopeID());
Out << ", align " << CXI->getAlign().value();
- } else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
+ } else if (const auto *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
writeAtomic(RMWI->getContext(), RMWI->getOrdering(),
RMWI->getSyncScopeID());
Out << ", align " << RMWI->getAlign().value();
- } else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
+ } else if (const auto *FI = dyn_cast<FenceInst>(&I)) {
writeAtomic(FI->getContext(), FI->getOrdering(), FI->getSyncScopeID());
- } else if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(&I)) {
- PrintShuffleMask(Out, SVI->getType(), SVI->getShuffleMask());
+ } else if (const auto *SVI = dyn_cast<ShuffleVectorInst>(&I)) {
+ printShuffleMask(Out, SVI->getType(), SVI->getShuffleMask());
}
// Print Metadata info.
@@ -4863,7 +4830,7 @@ void AssemblyWriter::printDbgVariableRecord(const DbgVariableRecord &DVR) {
if (!M)
Out << "(null)";
else
- WriteAsOperandInternal(Out, M, WriterCtx, true);
+ writeAsOperandInternal(Out, M, WriterCtx, true);
};
Out << "(";
@@ -4897,9 +4864,9 @@ void AssemblyWriter::printDbgRecordLine(const DbgRecord &DR) {
void AssemblyWriter::printDbgLabelRecord(const DbgLabelRecord &Label) {
auto WriterCtx = getContext();
Out << "#dbg_label(";
- WriteAsOperandInternal(Out, Label.getRawLabel(), WriterCtx, true);
+ writeAsOperandInternal(Out, Label.getRawLabel(), WriterCtx, true);
Out << ", ";
- WriteAsOperandInternal(Out, Label.getDebugLoc(), WriterCtx, true);
+ writeAsOperandInternal(Out, Label.getDebugLoc(), WriterCtx, true);
Out << ")";
}
@@ -4922,7 +4889,7 @@ void AssemblyWriter::printMetadataAttachments(
} else
Out << "!<unknown kind #" << Kind << ">";
Out << ' ';
- WriteAsOperandInternal(Out, I.second, WriterCtx);
+ writeAsOperandInternal(Out, I.second, WriterCtx);
}
}
@@ -4945,7 +4912,7 @@ void AssemblyWriter::writeAllMDNodes() {
void AssemblyWriter::printMDNodeBody(const MDNode *Node) {
auto WriterCtx = getContext();
- WriteMDNodeBodyInternal(Out, Node, WriterCtx);
+ writeMDNodeBodyInternal(Out, Node, WriterCtx);
}
void AssemblyWriter::writeAttribute(const Attribute &Attr, bool InAttrGroup) {
@@ -4964,12 +4931,10 @@ void AssemblyWriter::writeAttribute(const Attribute &Attr, bool InAttrGroup) {
void AssemblyWriter::writeAttributeSet(const AttributeSet &AttrSet,
bool InAttrGroup) {
- bool FirstAttr = true;
+ ListSeparator LS(" ");
for (const auto &Attr : AttrSet) {
- if (!FirstAttr)
- Out << ' ';
+ Out << LS;
writeAttribute(Attr, InAttrGroup);
- FirstAttr = false;
}
}
@@ -4986,7 +4951,7 @@ void AssemblyWriter::writeAllAttributeGroups() {
}
void AssemblyWriter::printUseListOrder(const Value *V,
- const std::vector<unsigned> &Shuffle) {
+ ArrayRef<unsigned> Shuffle) {
bool IsInFunction = Machine.getFunction();
if (IsInFunction)
Out << " ";
@@ -5075,7 +5040,7 @@ void NamedMDNode::print(raw_ostream &ROS, ModuleSlotTracker &MST,
}
void Comdat::print(raw_ostream &ROS, bool /*IsForDebug*/) const {
- PrintLLVMName(ROS, getName(), ComdatPrefix);
+ printLLVMName(ROS, getName(), ComdatPrefix);
ROS << " = comdat ";
switch (getSelectionKind()) {
@@ -5107,7 +5072,7 @@ void Type::print(raw_ostream &OS, bool /*IsForDebug*/, bool NoDetails) const {
return;
// If the type is a named struct type, print the body as well.
- if (StructType *STy = dyn_cast<StructType>(const_cast<Type*>(this)))
+ if (auto *STy = dyn_cast<StructType>(const_cast<Type *>(this)))
if (!STy->isLiteral()) {
OS << " = type ";
TP.printStructBody(STy, OS);
@@ -5143,11 +5108,9 @@ void DbgMarker::print(raw_ostream &ROS, ModuleSlotTracker &MST,
SlotTracker EmptySlotTable(static_cast<const Module *>(nullptr));
SlotTracker &SlotTable =
MST.getMachine() ? *MST.getMachine() : EmptySlotTable;
- auto incorporateFunction = [&](const Function *F) {
- if (F)
- MST.incorporateFunction(*F);
- };
- incorporateFunction(getParent() ? getParent()->getParent() : nullptr);
+ const Function *F = getParent() ? getParent()->getParent() : nullptr;
+ if (F)
+ MST.incorporateFunction(*F);
AssemblyWriter W(OS, SlotTable, getModuleFromDPI(this), nullptr, IsForDebug);
W.printDbgMarker(*this);
}
@@ -5164,13 +5127,11 @@ void DbgVariableRecord::print(raw_ostream &ROS, ModuleSlotTracker &MST,
SlotTracker EmptySlotTable(static_cast<const Module *>(nullptr));
SlotTracker &SlotTable =
MST.getMachine() ? *MST.getMachine() : EmptySlotTable;
- auto incorporateFunction = [&](const Function *F) {
- if (F)
- MST.incorporateFunction(*F);
- };
- incorporateFunction(Marker && Marker->getParent()
+ const Function *F = Marker && Marker->getParent()
? Marker->getParent()->getParent()
- : nullptr);
+ : nullptr;
+ if (F)
+ MST.incorporateFunction(*F);
AssemblyWriter W(OS, SlotTable, getModuleFromDPI(this), nullptr, IsForDebug);
W.printDbgVariableRecord(*this);
}
@@ -5181,12 +5142,11 @@ void DbgLabelRecord::print(raw_ostream &ROS, ModuleSlotTracker &MST,
SlotTracker EmptySlotTable(static_cast<const Module *>(nullptr));
SlotTracker &SlotTable =
MST.getMachine() ? *MST.getMachine() : EmptySlotTable;
- auto incorporateFunction = [&](const Function *F) {
- if (F)
- MST.incorporateFunction(*F);
- };
- incorporateFunction(Marker->getParent() ? Marker->getParent()->getParent()
- : nullptr);
+ const Function *F =
+ Marker->getParent() ? Marker->getParent()->getParent() : nullptr;
+ if (F)
+ MST.incorporateFunction(*F);
+
AssemblyWriter W(OS, SlotTable, getModuleFromDPI(this), nullptr, IsForDebug);
W.printDbgLabelRecord(*this);
}
@@ -5208,39 +5168,39 @@ void Value::print(raw_ostream &ROS, ModuleSlotTracker &MST,
SlotTracker EmptySlotTable(static_cast<const Module *>(nullptr));
SlotTracker &SlotTable =
MST.getMachine() ? *MST.getMachine() : EmptySlotTable;
- auto incorporateFunction = [&](const Function *F) {
+ auto IncorporateFunction = [&](const Function *F) {
if (F)
MST.incorporateFunction(*F);
};
- if (const Instruction *I = dyn_cast<Instruction>(this)) {
- incorporateFunction(I->getParent() ? I->getParent()->getParent() : nullptr);
+ if (const auto *I = dyn_cast<Instruction>(this)) {
+ IncorporateFunction(I->getParent() ? I->getParent()->getParent() : nullptr);
AssemblyWriter W(OS, SlotTable, getModuleFromVal(I), nullptr, IsForDebug);
W.printInstruction(*I);
- } else if (const BasicBlock *BB = dyn_cast<BasicBlock>(this)) {
- incorporateFunction(BB->getParent());
+ } else if (const auto *BB = dyn_cast<BasicBlock>(this)) {
+ IncorporateFunction(BB->getParent());
AssemblyWriter W(OS, SlotTable, getModuleFromVal(BB), nullptr, IsForDebug);
W.printBasicBlock(BB);
- } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(this)) {
+ } else if (const auto *GV = dyn_cast<GlobalValue>(this)) {
AssemblyWriter W(OS, SlotTable, GV->getParent(), nullptr, IsForDebug);
- if (const GlobalVariable *V = dyn_cast<GlobalVariable>(GV))
+ if (const auto *V = dyn_cast<GlobalVariable>(GV))
W.printGlobal(V);
- else if (const Function *F = dyn_cast<Function>(GV))
+ else if (const auto *F = dyn_cast<Function>(GV))
W.printFunction(F);
- else if (const GlobalAlias *A = dyn_cast<GlobalAlias>(GV))
+ else if (const auto *A = dyn_cast<GlobalAlias>(GV))
W.printAlias(A);
- else if (const GlobalIFunc *I = dyn_cast<GlobalIFunc>(GV))
+ else if (const auto *I = dyn_cast<GlobalIFunc>(GV))
W.printIFunc(I);
else
llvm_unreachable("Unknown GlobalValue to print out!");
- } else if (const MetadataAsValue *V = dyn_cast<MetadataAsValue>(this)) {
+ } else if (const auto *V = dyn_cast<MetadataAsValue>(this)) {
V->getMetadata()->print(ROS, MST, getModuleFromVal(V));
- } else if (const Constant *C = dyn_cast<Constant>(this)) {
+ } else if (const auto *C = dyn_cast<Constant>(this)) {
TypePrinting TypePrinter;
TypePrinter.print(C->getType(), OS);
OS << ' ';
AsmWriterContext WriterCtx(&TypePrinter, MST.getMachine());
- WriteConstantInternal(OS, C, WriterCtx);
+ writeConstantInternal(OS, C, WriterCtx);
} else if (isa<InlineAsm>(this) || isa<Argument>(this)) {
this->printAsOperand(OS, /* PrintType */ true, MST);
} else {
@@ -5256,7 +5216,7 @@ static bool printWithoutType(const Value &V, raw_ostream &O,
if (V.hasName() || isa<GlobalValue>(V) ||
(!isa<Constant>(V) && !isa<MetadataAsValue>(V))) {
AsmWriterContext WriterCtx(nullptr, Machine, M);
- WriteAsOperandInternal(O, &V, WriterCtx);
+ writeAsOperandInternal(O, &V, WriterCtx);
return true;
}
return false;
@@ -5265,13 +5225,8 @@ static bool printWithoutType(const Value &V, raw_ostream &O,
static void printAsOperandImpl(const Value &V, raw_ostream &O, bool PrintType,
ModuleSlotTracker &MST) {
TypePrinting TypePrinter(MST.getModule());
- if (PrintType) {
- TypePrinter.print(V.getType(), O);
- O << ' ';
- }
-
AsmWriterContext WriterCtx(&TypePrinter, MST.getMachine(), MST.getModule());
- WriteAsOperandInternal(O, &V, WriterCtx);
+ writeAsOperandInternal(O, &V, WriterCtx, PrintType);
}
void Value::printAsOperand(raw_ostream &O, bool PrintType,
@@ -5302,14 +5257,14 @@ void Value::printAsOperand(raw_ostream &O, bool PrintType,
static void printMetadataImplRec(raw_ostream &ROS, const Metadata &MD,
AsmWriterContext &WriterCtx) {
formatted_raw_ostream OS(ROS);
- WriteAsOperandInternal(OS, &MD, WriterCtx, /* FromValue */ true);
+ writeAsOperandInternal(OS, &MD, WriterCtx, /* FromValue */ true);
auto *N = dyn_cast<MDNode>(&MD);
if (!N || isa<DIExpression>(MD))
return;
OS << " = ";
- WriteMDNodeBodyInternal(OS, N, WriterCtx);
+ writeMDNodeBodyInternal(OS, N, WriterCtx);
}
namespace {
@@ -5370,14 +5325,14 @@ static void printMetadataImpl(raw_ostream &ROS, const Metadata &MD,
WriterCtx =
std::make_unique<AsmWriterContext>(&TypePrinter, MST.getMachine(), M);
- WriteAsOperandInternal(OS, &MD, *WriterCtx, /* FromValue */ true);
+ writeAsOperandInternal(OS, &MD, *WriterCtx, /* FromValue */ true);
auto *N = dyn_cast<MDNode>(&MD);
if (OnlyAsOperand || !N || isa<DIExpression>(MD))
return;
OS << " = ";
- WriteMDNodeBodyInternal(OS, N, *WriterCtx);
+ writeMDNodeBodyInternal(OS, N, *WriterCtx);
}
void Metadata::printAsOperand(raw_ostream &OS, const Module *M) const {
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index a8bb34f..33ca46c 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -30,6 +30,8 @@
#include "llvm/Support/Compiler.h"
using namespace llvm;
+namespace llvm {
+
// FIXME: Flag used for an ablation performance test, Issue #147390. Placing it
// here because referencing IR should be feasible from anywhere. Will be
// removed after the ablation test.
@@ -38,6 +40,8 @@ cl::opt<bool> ProfcheckDisableMetadataFixes(
cl::desc(
"Disable metadata propagation fixes discovered through Issue #147390"));
+} // end namespace llvm
+
InsertPosition::InsertPosition(Instruction *InsertBefore)
: InsertAt(InsertBefore ? InsertBefore->getIterator()
: InstListType::iterator()) {}
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index dd83168..941e41f 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -4141,23 +4141,6 @@ void SwitchInst::growOperands() {
growHungoffUses(ReservedSpace);
}
-MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
- assert(Changed && "called only if metadata has changed");
-
- if (!Weights)
- return nullptr;
-
- assert(SI.getNumSuccessors() == Weights->size() &&
- "num of prof branch_weights must accord with num of successors");
-
- bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
-
- if (AllZeroes || Weights->size() < 2)
- return nullptr;
-
- return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
-}
-
void SwitchInstProfUpdateWrapper::init() {
MDNode *ProfileData = getBranchWeightMDNode(SI);
if (!ProfileData)
diff --git a/llvm/lib/IR/Metadata.cpp b/llvm/lib/IR/Metadata.cpp
index 9cfb0ff..1add0c7 100644
--- a/llvm/lib/IR/Metadata.cpp
+++ b/llvm/lib/IR/Metadata.cpp
@@ -48,6 +48,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/ModRef.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
@@ -1435,6 +1436,40 @@ MDNode *MDNode::getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B) {
return B;
}
+CaptureComponents MDNode::toCaptureComponents(const MDNode *MD) {
+ if (!MD)
+ return CaptureComponents::All;
+
+ CaptureComponents CC = CaptureComponents::None;
+ for (Metadata *Op : MD->operands()) {
+ CaptureComponents Component =
+ StringSwitch<CaptureComponents>(cast<MDString>(Op)->getString())
+ .Case("address", CaptureComponents::Address)
+ .Case("address_is_null", CaptureComponents::AddressIsNull)
+ .Case("provenance", CaptureComponents::Provenance)
+ .Case("read_provenance", CaptureComponents::ReadProvenance);
+ CC |= Component;
+ }
+ return CC;
+}
+
+MDNode *MDNode::fromCaptureComponents(LLVMContext &Ctx, CaptureComponents CC) {
+ assert(!capturesNothing(CC) && "Can't encode captures(none)");
+ if (capturesAll(CC))
+ return nullptr;
+
+ SmallVector<Metadata *> Components;
+ if (capturesAddressIsNullOnly(CC))
+ Components.push_back(MDString::get(Ctx, "address_is_null"));
+ else if (capturesAddress(CC))
+ Components.push_back(MDString::get(Ctx, "address"));
+ if (capturesReadProvenanceOnly(CC))
+ Components.push_back(MDString::get(Ctx, "read_provenance"));
+ else if (capturesFullProvenance(CC))
+ Components.push_back(MDString::get(Ctx, "provenance"));
+ return MDNode::get(Ctx, Components);
+}
+
//===----------------------------------------------------------------------===//
// NamedMDNode implementation.
//
diff --git a/llvm/lib/IR/ProfDataUtils.cpp b/llvm/lib/IR/ProfDataUtils.cpp
index 99029c1..edeca97 100644
--- a/llvm/lib/IR/ProfDataUtils.cpp
+++ b/llvm/lib/IR/ProfDataUtils.cpp
@@ -12,6 +12,7 @@
#include "llvm/IR/ProfDataUtils.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
@@ -19,6 +20,7 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/Support/CommandLine.h"
using namespace llvm;
@@ -84,10 +86,31 @@ static void extractFromBranchWeightMD(const MDNode *ProfileData,
}
}
+/// Push the weights right to fit in uint32_t.
+static SmallVector<uint32_t> fitWeights(ArrayRef<uint64_t> Weights) {
+ SmallVector<uint32_t> Ret;
+ Ret.reserve(Weights.size());
+ uint64_t Max = *llvm::max_element(Weights);
+ if (Max > UINT_MAX) {
+ unsigned Offset = 32 - llvm::countl_zero(Max);
+ for (const uint64_t &Value : Weights)
+ Ret.push_back(static_cast<uint32_t>(Value >> Offset));
+ } else {
+ append_range(Ret, Weights);
+ }
+ return Ret;
+}
+
} // namespace
namespace llvm {
-
+cl::opt<bool> ElideAllZeroBranchWeights("elide-all-zero-branch-weights",
+#if defined(LLVM_ENABLE_PROFCHECK)
+ cl::init(false)
+#else
+ cl::init(true)
+#endif
+);
const char *MDProfLabels::BranchWeights = "branch_weights";
const char *MDProfLabels::ExpectedBranchWeights = "expected";
const char *MDProfLabels::ValueProfile = "VP";
@@ -282,12 +305,23 @@ bool hasExplicitlyUnknownBranchWeights(const Instruction &I) {
}
void setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights,
- bool IsExpected) {
+ bool IsExpected, bool ElideAllZero) {
+ if ((ElideAllZeroBranchWeights && ElideAllZero) &&
+ llvm::all_of(Weights, [](uint32_t V) { return V == 0; })) {
+ I.setMetadata(LLVMContext::MD_prof, nullptr);
+ return;
+ }
+
MDBuilder MDB(I.getContext());
MDNode *BranchWeights = MDB.createBranchWeights(Weights, IsExpected);
I.setMetadata(LLVMContext::MD_prof, BranchWeights);
}
+void setFittedBranchWeights(Instruction &I, ArrayRef<uint64_t> Weights,
+ bool IsExpected, bool ElideAllZero) {
+ setBranchWeights(I, fitWeights(Weights), IsExpected, ElideAllZero);
+}
+
SmallVector<uint32_t> downscaleWeights(ArrayRef<uint64_t> Weights,
std::optional<uint64_t> KnownMaxCount) {
uint64_t MaxCount = KnownMaxCount.has_value() ? KnownMaxCount.value()
diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp
index e5e062d..a347609 100644
--- a/llvm/lib/IR/Value.cpp
+++ b/llvm/lib/IR/Value.cpp
@@ -36,7 +36,7 @@
using namespace llvm;
-cl::opt<bool> UseDerefAtPointSemantics(
+static cl::opt<bool> UseDerefAtPointSemantics(
"use-dereferenceable-at-point-semantics", cl::Hidden, cl::init(false),
cl::desc("Deref attributes and metadata infer facts at definition only"));
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 8c03d6f..6b3cd27 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -542,6 +542,7 @@ private:
void visitAliasScopeMetadata(const MDNode *MD);
void visitAliasScopeListMetadata(const MDNode *MD);
void visitAccessGroupMetadata(const MDNode *MD);
+ void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
template <class Ty> bool isValidMetadataArray(const MDTuple &N);
#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
@@ -5373,6 +5374,27 @@ void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
}
}
+void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
+ static const char *ValidArgs[] = {"address_is_null", "address",
+ "read_provenance", "provenance"};
+
+ auto *SI = dyn_cast<StoreInst>(&I);
+ Check(SI, "!captures metadata can only be applied to store instructions", &I);
+ Check(SI->getValueOperand()->getType()->isPointerTy(),
+ "!captures metadata can only be applied to store with value operand of "
+ "pointer type",
+ &I);
+ Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
+ &I);
+
+ for (Metadata *Op : Captures->operands()) {
+ auto *Str = dyn_cast<MDString>(Op);
+ Check(Str, "!captures metadata must be a list of strings", &I);
+ Check(is_contained(ValidArgs, Str->getString()),
+ "invalid entry in !captures metadata", &I, Str);
+ }
+}
+
/// verifyInstruction - Verify that an instruction is well formed.
///
void Verifier::visitInstruction(Instruction &I) {
@@ -5600,6 +5622,9 @@ void Verifier::visitInstruction(Instruction &I) {
if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
visitAnnotationMetadata(Annotation);
+ if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
+ visitCapturesMetadata(I, Captures);
+
if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
visitMDNode(*N, AreDebugLocsAllowed::Yes);
diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp
index 7b25262..e6544f3 100644
--- a/llvm/lib/LTO/LTO.cpp
+++ b/llvm/lib/LTO/LTO.cpp
@@ -75,9 +75,10 @@ static cl::opt<bool>
DumpThinCGSCCs("dump-thin-cg-sccs", cl::init(false), cl::Hidden,
cl::desc("Dump the SCCs in the ThinLTO index's callgraph"));
+namespace llvm {
extern cl::opt<bool> CodeGenDataThinLTOTwoRounds;
-
extern cl::opt<bool> ForceImportAll;
+} // end namespace llvm
namespace llvm {
/// Enable global value internalization in LTO.
diff --git a/llvm/lib/Object/OffloadBundle.cpp b/llvm/lib/Object/OffloadBundle.cpp
index a6a9628a..329dcbf 100644
--- a/llvm/lib/Object/OffloadBundle.cpp
+++ b/llvm/lib/Object/OffloadBundle.cpp
@@ -128,7 +128,7 @@ OffloadBundleFatBin::create(MemoryBufferRef Buf, uint64_t SectionOffset,
if (Err)
return Err;
- return TheBundle;
+ return std::move(TheBundle);
}
Error OffloadBundleFatBin::extractBundle(const ObjectFile &Source) {
diff --git a/llvm/lib/ObjectYAML/DXContainerYAML.cpp b/llvm/lib/ObjectYAML/DXContainerYAML.cpp
index 3c09ae4..5dff9ba 100644
--- a/llvm/lib/ObjectYAML/DXContainerYAML.cpp
+++ b/llvm/lib/ObjectYAML/DXContainerYAML.cpp
@@ -154,7 +154,7 @@ DXContainerYAML::RootSignatureYamlDesc::create(
if (Error E = readDescriptorRanges<dxbc::RTS0::v1::DescriptorRange>(
Header, RootSigDesc, DTV))
return std::move(E);
- } else if (Version == 2) {
+ } else if (Version == 2 || Version == 3) {
if (Error E = readDescriptorRanges<dxbc::RTS0::v2::DescriptorRange>(
Header, RootSigDesc, DTV))
return std::move(E);
diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index 256cf9d..7069e8d 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -150,6 +150,8 @@
using namespace llvm;
+namespace llvm {
+
static cl::opt<InliningAdvisorMode> UseInlineAdvisor(
"enable-ml-inliner", cl::init(InliningAdvisorMode::Default), cl::Hidden,
cl::desc("Enable ML policy for inliner. Currently trained for -Oz only"),
@@ -305,7 +307,6 @@ static cl::opt<std::string> InstrumentColdFuncOnlyPath(
extern cl::opt<std::string> UseCtxProfile;
extern cl::opt<bool> PGOInstrumentColdFunctionOnly;
-namespace llvm {
extern cl::opt<bool> EnableMemProfContextDisambiguation;
} // namespace llvm
@@ -610,7 +611,9 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
// Jump table to switch conversion.
if (EnableJumpTableToSwitch)
- FPM.addPass(JumpTableToSwitchPass());
+ FPM.addPass(JumpTableToSwitchPass(
+ /*InLTO=*/Phase == ThinOrFullLTOPhase::ThinLTOPostLink ||
+ Phase == ThinOrFullLTOPhase::FullLTOPostLink));
FPM.addPass(
SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
diff --git a/llvm/lib/ProfileData/MemProfCommon.cpp b/llvm/lib/ProfileData/MemProfCommon.cpp
index a13a291..cfd2efd 100644
--- a/llvm/lib/ProfileData/MemProfCommon.cpp
+++ b/llvm/lib/ProfileData/MemProfCommon.cpp
@@ -20,6 +20,8 @@
using namespace llvm;
using namespace llvm::memprof;
+namespace llvm {
+
// Upper bound on lifetime access density (accesses per byte per lifetime sec)
// for marking an allocation cold.
LLVM_ABI cl::opt<float> MemProfLifetimeAccessDensityColdThreshold(
@@ -48,6 +50,8 @@ LLVM_ABI cl::opt<bool>
cl::desc("Enable use of hot hints (only supported for "
"unambigously hot allocations)"));
+} // end namespace llvm
+
AllocationType llvm::memprof::getAllocType(uint64_t TotalLifetimeAccessDensity,
uint64_t AllocCount,
uint64_t TotalLifetime) {
diff --git a/llvm/lib/Support/APFloat.cpp b/llvm/lib/Support/APFloat.cpp
index d14abb4..8623c06 100644
--- a/llvm/lib/Support/APFloat.cpp
+++ b/llvm/lib/Support/APFloat.cpp
@@ -5857,7 +5857,7 @@ DoubleAPFloat frexp(const DoubleAPFloat &Arg, int &Exp,
// practice.
if (Exp == APFloat::IEK_NaN) {
DoubleAPFloat Quiet{Arg};
- Quiet.getFirst().makeQuiet();
+ Quiet.getFirst() = Quiet.getFirst().makeQuiet();
return Quiet;
}
diff --git a/llvm/lib/Support/Mustache.cpp b/llvm/lib/Support/Mustache.cpp
index 6275e5e..47860c0 100644
--- a/llvm/lib/Support/Mustache.cpp
+++ b/llvm/lib/Support/Mustache.cpp
@@ -329,6 +329,36 @@ struct Tag {
size_t StartPosition = StringRef::npos;
};
+[[maybe_unused]] static const char *tagKindToString(Tag::Kind K) {
+ switch (K) {
+ case Tag::Kind::None:
+ return "None";
+ case Tag::Kind::Normal:
+ return "Normal";
+ case Tag::Kind::Triple:
+ return "Triple";
+ }
+ llvm_unreachable("Unknown Tag::Kind");
+}
+
+[[maybe_unused]] static const char *jsonKindToString(json::Value::Kind K) {
+ switch (K) {
+ case json::Value::Kind::Null:
+ return "JSON_KIND_NULL";
+ case json::Value::Kind::Boolean:
+ return "JSON_KIND_BOOLEAN";
+ case json::Value::Kind::Number:
+ return "JSON_KIND_NUMBER";
+ case json::Value::Kind::String:
+ return "JSON_KIND_STRING";
+ case json::Value::Kind::Array:
+ return "JSON_KIND_ARRAY";
+ case json::Value::Kind::Object:
+ return "JSON_KIND_OBJECT";
+ }
+ llvm_unreachable("Unknown json::Value::Kind");
+}
+
static Tag findNextTag(StringRef Template, size_t StartPos, StringRef Open,
StringRef Close) {
const StringLiteral TripleOpen("{{{");
@@ -373,11 +403,10 @@ static Tag findNextTag(StringRef Template, size_t StartPos, StringRef Open,
static std::optional<std::pair<StringRef, StringRef>>
processTag(const Tag &T, SmallVectorImpl<Token> &Tokens) {
- LLVM_DEBUG(dbgs() << " Found tag: \"" << T.FullMatch << "\", Content: \""
- << T.Content << "\"\n");
+ LLVM_DEBUG(dbgs() << "[Tag] " << T.FullMatch << ", Content: " << T.Content
+ << ", Kind: " << tagKindToString(T.TagKind) << "\n");
if (T.TagKind == Tag::Kind::Triple) {
Tokens.emplace_back(T.FullMatch.str(), "&" + T.Content.str(), '&');
- LLVM_DEBUG(dbgs() << " Created UnescapeVariable token.\n");
return std::nullopt;
}
StringRef Interpolated = T.Content;
@@ -385,7 +414,6 @@ processTag(const Tag &T, SmallVectorImpl<Token> &Tokens) {
if (!Interpolated.trim().starts_with("=")) {
char Front = Interpolated.empty() ? ' ' : Interpolated.trim().front();
Tokens.emplace_back(RawBody, Interpolated.str(), Front);
- LLVM_DEBUG(dbgs() << " Created tag token of type '" << Front << "'\n");
return std::nullopt;
}
Tokens.emplace_back(RawBody, Interpolated.str(), '=');
@@ -395,8 +423,8 @@ processTag(const Tag &T, SmallVectorImpl<Token> &Tokens) {
DelimSpec = DelimSpec.trim();
std::pair<StringRef, StringRef> Ret = DelimSpec.split(' ');
- LLVM_DEBUG(dbgs() << " Found Set Delimiter tag. NewOpen='" << Ret.first
- << "', NewClose='" << Ret.second << "'\n");
+ LLVM_DEBUG(dbgs() << "[Set Delimiter] NewOpen: " << Ret.first
+ << ", NewClose: " << Ret.second << "\n");
return Ret;
}
@@ -405,15 +433,15 @@ processTag(const Tag &T, SmallVectorImpl<Token> &Tokens) {
// but we don't support that here. An unescape variable
// is represented only by {{& variable}}.
static SmallVector<Token> tokenize(StringRef Template) {
- LLVM_DEBUG(dbgs() << "Tokenizing template: \"" << Template << "\"\n");
+ LLVM_DEBUG(dbgs() << "[Tokenize Template] \"" << Template << "\"\n");
SmallVector<Token> Tokens;
SmallString<8> Open("{{");
SmallString<8> Close("}}");
size_t Start = 0;
while (Start < Template.size()) {
- LLVM_DEBUG(dbgs() << "Loop start. Start=" << Start << ", Open='" << Open
- << "', Close='" << Close << "'\n");
+ LLVM_DEBUG(dbgs() << "[Tokenize Loop] Start:" << Start << ", Open:'" << Open
+ << "', Close:'" << Close << "'\n");
Tag T = findNextTag(Template, Start, Open, Close);
if (T.TagKind == Tag::Kind::None) {
@@ -428,7 +456,6 @@ static SmallVector<Token> tokenize(StringRef Template) {
if (T.StartPosition > Start) {
StringRef Text = Template.substr(Start, T.StartPosition - Start);
Tokens.emplace_back(Text.str());
- LLVM_DEBUG(dbgs() << " Created Text token: \"" << Text << "\"\n");
}
if (auto NewDelims = processTag(T, Tokens)) {
@@ -479,7 +506,6 @@ static SmallVector<Token> tokenize(StringRef Template) {
if ((!HasTextBehind && !HasTextAhead) || (!HasTextBehind && Idx == LastIdx))
stripTokenBefore(Tokens, Idx, CurrentToken, CurrentType);
}
- LLVM_DEBUG(dbgs() << "Tokenizing finished.\n");
return Tokens;
}
@@ -545,8 +571,8 @@ protected:
Indent.resize(Indentation, ' ');
for (char C : Data) {
- LLVM_DEBUG(dbgs() << "IndentationStream: NeedsIndent=" << NeedsIndent
- << ", C='" << C << "', Indentation=" << Indentation
+ LLVM_DEBUG(dbgs() << "[Indentation Stream] NeedsIndent:" << NeedsIndent
+ << ", C:'" << C << "', Indentation:" << Indentation
<< "\n");
if (NeedsIndent && C != '\n') {
WrappedStream << Indent;
@@ -654,7 +680,9 @@ void Parser::parseMustache(ASTNode *Parent) {
}
}
static void toMustacheString(const json::Value &Data, raw_ostream &OS) {
- LLVM_DEBUG(dbgs() << "toMustacheString: kind=" << (int)Data.kind() << "\n");
+ LLVM_DEBUG(dbgs() << "[To Mustache String] Kind: "
+ << jsonKindToString(Data.kind()) << ", Data: " << Data
+ << "\n");
switch (Data.kind()) {
case json::Value::Null:
return;
@@ -667,7 +695,6 @@ static void toMustacheString(const json::Value &Data, raw_ostream &OS) {
}
case json::Value::String: {
auto Str = *Data.getAsString();
- LLVM_DEBUG(dbgs() << " --> writing string: \"" << Str << "\"\n");
OS << Str.str();
return;
}
@@ -696,8 +723,8 @@ void ASTNode::renderText(MustacheOutputStream &OS) { OS << Body; }
void ASTNode::renderPartial(const json::Value &CurrentCtx,
MustacheOutputStream &OS) {
- LLVM_DEBUG(dbgs() << "renderPartial: Accessor=" << AccessorValue[0]
- << ", Indentation=" << Indentation << "\n");
+ LLVM_DEBUG(dbgs() << "[Render Partial] Accessor:" << AccessorValue[0]
+ << ", Indentation:" << Indentation << "\n");
auto Partial = Ctx.Partials.find(AccessorValue[0]);
if (Partial != Ctx.Partials.end())
renderPartial(CurrentCtx, OS, Partial->getValue().get());
@@ -716,13 +743,12 @@ void ASTNode::renderVariable(const json::Value &CurrentCtx,
void ASTNode::renderUnescapeVariable(const json::Value &CurrentCtx,
MustacheOutputStream &OS) {
- LLVM_DEBUG(dbgs() << "renderUnescapeVariable: Accessor=" << AccessorValue[0]
+ LLVM_DEBUG(dbgs() << "[Render UnescapeVariable] Accessor:" << AccessorValue[0]
<< "\n");
auto Lambda = Ctx.Lambdas.find(AccessorValue[0]);
if (Lambda != Ctx.Lambdas.end()) {
renderLambdas(CurrentCtx, OS, Lambda->getValue());
} else if (const json::Value *ContextPtr = findContext()) {
- LLVM_DEBUG(dbgs() << " --> Found context value, writing to stream.\n");
OS.suspendIndentation();
toMustacheString(*ContextPtr, OS);
OS.resumeIndentation();
@@ -792,8 +818,6 @@ void ASTNode::render(const llvm::json::Value &Data, MustacheOutputStream &OS) {
}
const json::Value *ASTNode::findContext() {
- LLVM_DEBUG(dbgs() << "findContext: AccessorValue[0]=" << AccessorValue[0]
- << "\n");
// The mustache spec allows for dot notation to access nested values
// a single dot refers to the current context.
// We attempt to find the JSON context in the current node, if it is not
@@ -808,22 +832,12 @@ const json::Value *ASTNode::findContext() {
StringRef CurrentAccessor = AccessorValue[0];
ASTNode *CurrentParent = Parent;
- LLVM_DEBUG(dbgs() << "findContext: ParentContext: ";
- if (ParentContext) ParentContext->print(dbgs());
- else dbgs() << "nullptr"; dbgs() << "\n");
-
while (!CurrentContext || !CurrentContext->get(CurrentAccessor)) {
- LLVM_DEBUG(dbgs() << "findContext: climbing parent\n");
if (CurrentParent->Ty != Root) {
CurrentContext = CurrentParent->ParentContext->getAsObject();
CurrentParent = CurrentParent->Parent;
- LLVM_DEBUG(dbgs() << "findContext: new ParentContext: ";
- if (CurrentParent->ParentContext)
- CurrentParent->ParentContext->print(dbgs());
- else dbgs() << "nullptr"; dbgs() << "\n");
continue;
}
- LLVM_DEBUG(dbgs() << "findContext: reached root, not found\n");
return nullptr;
}
const json::Value *Context = nullptr;
@@ -839,9 +853,6 @@ const json::Value *ASTNode::findContext() {
Context = CurrentValue;
}
}
- LLVM_DEBUG(dbgs() << "findContext: found value: ";
- if (Context) Context->print(dbgs()); else dbgs() << "nullptr";
- dbgs() << "\n");
return Context;
}
@@ -853,8 +864,7 @@ void ASTNode::renderChild(const json::Value &Contexts,
void ASTNode::renderPartial(const json::Value &Contexts,
MustacheOutputStream &OS, ASTNode *Partial) {
- LLVM_DEBUG(dbgs() << "renderPartial (helper): Indentation=" << Indentation
- << "\n");
+ LLVM_DEBUG(dbgs() << "[Render Partial Indentation] Indentation: " << Indentation << "\n");
AddIndentationStringStream IS(OS, Indentation);
Partial->render(Contexts, IS);
}
diff --git a/llvm/lib/Support/Path.cpp b/llvm/lib/Support/Path.cpp
index 761d29e..3e06666 100644
--- a/llvm/lib/Support/Path.cpp
+++ b/llvm/lib/Support/Path.cpp
@@ -700,6 +700,55 @@ bool is_relative(const Twine &path, Style style) {
return !is_absolute(path, style);
}
+void make_absolute(const Twine &current_directory,
+ SmallVectorImpl<char> &path) {
+ StringRef p(path.data(), path.size());
+
+ bool rootDirectory = has_root_directory(p);
+ bool rootName = has_root_name(p);
+
+ // Already absolute.
+ if ((rootName || is_style_posix(Style::native)) && rootDirectory)
+ return;
+
+ // All the following conditions will need the current directory.
+ SmallString<128> current_dir;
+ current_directory.toVector(current_dir);
+
+ // Relative path. Prepend the current directory.
+ if (!rootName && !rootDirectory) {
+ // Append path to the current directory.
+ append(current_dir, p);
+ // Set path to the result.
+ path.swap(current_dir);
+ return;
+ }
+
+ if (!rootName && rootDirectory) {
+ StringRef cdrn = root_name(current_dir);
+ SmallString<128> curDirRootName(cdrn.begin(), cdrn.end());
+ append(curDirRootName, p);
+ // Set path to the result.
+ path.swap(curDirRootName);
+ return;
+ }
+
+ if (rootName && !rootDirectory) {
+ StringRef pRootName = root_name(p);
+ StringRef bRootDirectory = root_directory(current_dir);
+ StringRef bRelativePath = relative_path(current_dir);
+ StringRef pRelativePath = relative_path(p);
+
+ SmallString<128> res;
+ append(res, pRootName, bRootDirectory, bRelativePath, pRelativePath);
+ path.swap(res);
+ return;
+ }
+
+ llvm_unreachable("All rootName and rootDirectory combinations should have "
+ "occurred above!");
+}
+
StringRef remove_leading_dotslash(StringRef Path, Style style) {
// Remove leading "./" (or ".//" or "././" etc.)
while (Path.size() > 2 && Path[0] == '.' && is_separator(Path[1], style)) {
@@ -903,55 +952,6 @@ getPotentiallyUniqueTempFileName(const Twine &Prefix, StringRef Suffix,
return createTemporaryFile(Prefix, Suffix, Dummy, ResultPath, FS_Name);
}
-void make_absolute(const Twine &current_directory,
- SmallVectorImpl<char> &path) {
- StringRef p(path.data(), path.size());
-
- bool rootDirectory = path::has_root_directory(p);
- bool rootName = path::has_root_name(p);
-
- // Already absolute.
- if ((rootName || is_style_posix(Style::native)) && rootDirectory)
- return;
-
- // All of the following conditions will need the current directory.
- SmallString<128> current_dir;
- current_directory.toVector(current_dir);
-
- // Relative path. Prepend the current directory.
- if (!rootName && !rootDirectory) {
- // Append path to the current directory.
- path::append(current_dir, p);
- // Set path to the result.
- path.swap(current_dir);
- return;
- }
-
- if (!rootName && rootDirectory) {
- StringRef cdrn = path::root_name(current_dir);
- SmallString<128> curDirRootName(cdrn.begin(), cdrn.end());
- path::append(curDirRootName, p);
- // Set path to the result.
- path.swap(curDirRootName);
- return;
- }
-
- if (rootName && !rootDirectory) {
- StringRef pRootName = path::root_name(p);
- StringRef bRootDirectory = path::root_directory(current_dir);
- StringRef bRelativePath = path::relative_path(current_dir);
- StringRef pRelativePath = path::relative_path(p);
-
- SmallString<128> res;
- path::append(res, pRootName, bRootDirectory, bRelativePath, pRelativePath);
- path.swap(res);
- return;
- }
-
- llvm_unreachable("All rootName and rootDirectory combinations should have "
- "occurred above!");
-}
-
std::error_code make_absolute(SmallVectorImpl<char> &path) {
if (path::is_absolute(path))
return {};
@@ -960,7 +960,7 @@ std::error_code make_absolute(SmallVectorImpl<char> &path) {
if (std::error_code ec = current_path(current_dir))
return ec;
- make_absolute(current_dir, path);
+ path::make_absolute(current_dir, path);
return {};
}
diff --git a/llvm/lib/Support/ScopedPrinter.cpp b/llvm/lib/Support/ScopedPrinter.cpp
index a17e397..efb6178 100644
--- a/llvm/lib/Support/ScopedPrinter.cpp
+++ b/llvm/lib/Support/ScopedPrinter.cpp
@@ -1,12 +1,17 @@
-#include "llvm/Support/ScopedPrinter.h"
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/Format.h"
-using namespace llvm::support;
+using namespace llvm;
-namespace llvm {
-
-raw_ostream &operator<<(raw_ostream &OS, const HexNumber &Value) {
+raw_ostream &llvm::operator<<(raw_ostream &OS, const HexNumber &Value) {
OS << "0x" << utohexstr(Value.Value);
return OS;
}
@@ -45,5 +50,3 @@ JSONScopedPrinter::JSONScopedPrinter(
if (this->OuterScope)
this->OuterScope->setPrinter(*this);
}
-
-} // namespace llvm
diff --git a/llvm/lib/Support/StringMap.cpp b/llvm/lib/Support/StringMap.cpp
index 3432dc1..4aee30c 100644
--- a/llvm/lib/Support/StringMap.cpp
+++ b/llvm/lib/Support/StringMap.cpp
@@ -83,7 +83,7 @@ unsigned StringMapImpl::LookupBucketFor(StringRef Name,
// Hash table unallocated so far?
if (NumBuckets == 0)
init(16);
- if (shouldReverseIterate())
+ if constexpr (shouldReverseIterate())
FullHashValue = ~FullHashValue;
unsigned BucketNo = FullHashValue & (NumBuckets - 1);
unsigned *HashTable = getHashTable(TheTable, NumBuckets);
@@ -142,7 +142,7 @@ int StringMapImpl::FindKey(StringRef Key, uint32_t FullHashValue) const {
#ifdef EXPENSIVE_CHECKS
assert(FullHashValue == hash(Key));
#endif
- if (shouldReverseIterate())
+ if constexpr (shouldReverseIterate())
FullHashValue = ~FullHashValue;
unsigned BucketNo = FullHashValue & (NumBuckets - 1);
unsigned *HashTable = getHashTable(TheTable, NumBuckets);
diff --git a/llvm/lib/Support/VirtualFileSystem.cpp b/llvm/lib/Support/VirtualFileSystem.cpp
index 44d2ee7..c754b30 100644
--- a/llvm/lib/Support/VirtualFileSystem.cpp
+++ b/llvm/lib/Support/VirtualFileSystem.cpp
@@ -133,7 +133,7 @@ std::error_code FileSystem::makeAbsolute(SmallVectorImpl<char> &Path) const {
if (!WorkingDir)
return WorkingDir.getError();
- llvm::sys::fs::make_absolute(WorkingDir.get(), Path);
+ sys::path::make_absolute(WorkingDir.get(), Path);
return {};
}
@@ -300,7 +300,7 @@ private:
if (!WD || !*WD)
return Path;
Path.toVector(Storage);
- sys::fs::make_absolute(WD->get().Resolved, Storage);
+ sys::path::make_absolute(WD->get().Resolved, Storage);
return Storage;
}
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index 79655e1..0f4bbfc3 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -1610,7 +1610,8 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
int BaseOffset = -AFI->getTaggedBasePointerOffset();
Register FrameReg;
StackOffset FrameRegOffset = TFI->resolveFrameOffsetReference(
- MF, BaseOffset, false /*isFixed*/, false /*isSVE*/, FrameReg,
+ MF, BaseOffset, false /*isFixed*/, TargetStackID::Default /*StackID*/,
+ FrameReg,
/*PreferFP=*/false,
/*ForSimm=*/true);
Register SrcReg = FrameReg;
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index ab5c6f3..8d6eb91 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -56,15 +56,20 @@
// | async context if needed |
// | (a.k.a. "frame record") |
// |-----------------------------------| <- fp(=x29)
-// | <hazard padding> |
-// |-----------------------------------|
-// | |
-// | callee-saved fp/simd/SVE regs |
-// | |
-// |-----------------------------------|
-// | |
-// | SVE stack objects |
-// | |
+// Default SVE stack layout Split SVE objects
+// (aarch64-split-sve-objects=false) (aarch64-split-sve-objects=true)
+// |-----------------------------------| |-----------------------------------|
+// | <hazard padding> | | callee-saved PPR registers |
+// |-----------------------------------| |-----------------------------------|
+// | | | PPR stack objects |
+// | callee-saved fp/simd/SVE regs | |-----------------------------------|
+// | | | <hazard padding> |
+// |-----------------------------------| |-----------------------------------|
+// | | | callee-saved ZPR/FPR registers |
+// | SVE stack objects | |-----------------------------------|
+// | | | ZPR stack objects |
+// |-----------------------------------| |-----------------------------------|
+// ^ NB: FPR CSRs are promoted to ZPRs
// |-----------------------------------|
// |.empty.space.to.make.part.below....|
// |.aligned.in.case.it.needs.more.than| (size of this area is unknown at
@@ -274,6 +279,11 @@ static cl::opt<bool> OrderFrameObjects("aarch64-order-frame-objects",
cl::desc("sort stack allocations"),
cl::init(true), cl::Hidden);
+static cl::opt<bool>
+ SplitSVEObjects("aarch64-split-sve-objects",
+ cl::desc("Split allocation of ZPR & PPR objects"),
+ cl::init(false), cl::Hidden);
+
cl::opt<bool> EnableHomogeneousPrologEpilog(
"homogeneous-prolog-epilog", cl::Hidden,
cl::desc("Emit homogeneous prologue and epilogue for the size "
@@ -324,7 +334,41 @@ AArch64FrameLowering::getArgumentStackToRestore(MachineFunction &MF,
static bool produceCompactUnwindFrame(const AArch64FrameLowering &,
MachineFunction &MF);
-// Conservatively, returns true if the function is likely to have an SVE vectors
+enum class AssignObjectOffsets { No, Yes };
+/// Process all the SVE stack objects and the SVE stack size and offsets for
+/// each object. If AssignOffsets is "Yes", the offsets get assigned (and SVE
+/// stack sizes set). Returns the size of the SVE stack.
+static SVEStackSizes determineSVEStackSizes(MachineFunction &MF,
+ AssignObjectOffsets AssignOffsets);
+
+static unsigned getStackHazardSize(const MachineFunction &MF) {
+ return MF.getSubtarget<AArch64Subtarget>().getStreamingHazardSize();
+}
+
+/// Returns true if PPRs are spilled as ZPRs.
+static bool arePPRsSpilledAsZPR(const MachineFunction &MF) {
+ return MF.getSubtarget().getRegisterInfo()->getSpillSize(
+ AArch64::PPRRegClass) == 16;
+}
+
+StackOffset
+AArch64FrameLowering::getZPRStackSize(const MachineFunction &MF) const {
+ const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ return StackOffset::getScalable(AFI->getStackSizeZPR());
+}
+
+StackOffset
+AArch64FrameLowering::getPPRStackSize(const MachineFunction &MF) const {
+ // With split SVE objects, the hazard padding is added to the PPR region,
+ // which places it between the [GPR, PPR] area and the [ZPR, FPR] area. This
+ // avoids hazards between both GPRs and FPRs and ZPRs and PPRs.
+ const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ return StackOffset::get(AFI->hasSplitSVEObjects() ? getStackHazardSize(MF)
+ : 0,
+ AFI->getStackSizePPR());
+}
+
+// Conservatively, returns true if the function is likely to have SVE vectors
// on the stack. This function is safe to be called before callee-saves or
// object offsets have been determined.
static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL,
@@ -338,7 +382,7 @@ static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL,
const MachineFrameInfo &MFI = MF.getFrameInfo();
for (int FI = MFI.getObjectIndexBegin(); FI < MFI.getObjectIndexEnd(); FI++) {
- if (MFI.getStackID(FI) == TargetStackID::ScalableVector)
+ if (MFI.hasScalableStackID(FI))
return true;
}
@@ -482,13 +526,6 @@ AArch64FrameLowering::getFixedObjectSize(const MachineFunction &MF,
}
}
-/// Returns the size of the entire SVE stackframe (calleesaves + spills).
-StackOffset
-AArch64FrameLowering::getSVEStackSize(const MachineFunction &MF) const {
- const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
- return StackOffset::getScalable((int64_t)AFI->getStackSizeSVE());
-}
-
bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
if (!EnableRedZone)
return false;
@@ -514,7 +551,7 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
!Subtarget.hasSVE();
return !(MFI.hasCalls() || hasFP(MF) || NumBytes > RedZoneSize ||
- getSVEStackSize(MF) || LowerQRegCopyThroughMem);
+ AFI->hasSVEStackSize() || LowerQRegCopyThroughMem);
}
/// hasFPImpl - Return true if the specified function should have a dedicated
@@ -557,7 +594,7 @@ bool AArch64FrameLowering::hasFPImpl(const MachineFunction &MF) const {
// CFA in either of these cases.
if (AFI.needsDwarfUnwindInfo(MF) &&
((requiresSaveVG(MF) || AFI.getSMEFnAttrs().hasStreamingBody()) &&
- (!AFI.hasCalculatedStackSizeSVE() || AFI.getStackSizeSVE() > 0)))
+ (!AFI.hasCalculatedStackSizeSVE() || AFI.hasSVEStackSize())))
return true;
// With large callframes around we may need to use FP to access the scavenging
// emergency spillslot.
@@ -1126,10 +1163,6 @@ static bool isTargetWindows(const MachineFunction &MF) {
return MF.getSubtarget<AArch64Subtarget>().isTargetWindows();
}
-static unsigned getStackHazardSize(const MachineFunction &MF) {
- return MF.getSubtarget<AArch64Subtarget>().getStreamingHazardSize();
-}
-
void AArch64FrameLowering::emitPacRetPlusLeafHardening(
MachineFunction &MF) const {
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
@@ -1212,7 +1245,9 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
const auto &MFI = MF.getFrameInfo();
int64_t ObjectOffset = MFI.getObjectOffset(FI);
- StackOffset SVEStackSize = getSVEStackSize(MF);
+ StackOffset ZPRStackSize = getZPRStackSize(MF);
+ StackOffset PPRStackSize = getPPRStackSize(MF);
+ StackOffset SVEStackSize = ZPRStackSize + PPRStackSize;
// For VLA-area objects, just emit an offset at the end of the stack frame.
// Whilst not quite correct, these objects do live at the end of the frame and
@@ -1228,11 +1263,21 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
const auto *AFI = MF.getInfo<AArch64FunctionInfo>();
bool FPAfterSVECalleeSaves =
isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize();
- if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
+ if (MFI.hasScalableStackID(FI)) {
if (FPAfterSVECalleeSaves &&
- -ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize())
+ -ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize()) {
+ assert(!AFI->hasSplitSVEObjects() &&
+ "split-sve-objects not supported with FPAfterSVECalleeSaves");
return StackOffset::getScalable(ObjectOffset);
- return StackOffset::get(-((int64_t)AFI->getCalleeSavedStackSize()),
+ }
+ StackOffset AccessOffset{};
+ // The scalable vectors are below (lower address) the scalable predicates
+ // with split SVE objects, so we must subtract the size of the predicates.
+ if (AFI->hasSplitSVEObjects() &&
+ MFI.getStackID(FI) == TargetStackID::ScalableVector)
+ AccessOffset = -PPRStackSize;
+ return AccessOffset +
+ StackOffset::get(-((int64_t)AFI->getCalleeSavedStackSize()),
ObjectOffset);
}
@@ -1294,14 +1339,15 @@ StackOffset AArch64FrameLowering::resolveFrameIndexReference(
const auto &MFI = MF.getFrameInfo();
int64_t ObjectOffset = MFI.getObjectOffset(FI);
bool isFixed = MFI.isFixedObjectIndex(FI);
- bool isSVE = MFI.getStackID(FI) == TargetStackID::ScalableVector;
- return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg,
- PreferFP, ForSimm);
+ auto StackID = static_cast<TargetStackID::Value>(MFI.getStackID(FI));
+ return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, StackID,
+ FrameReg, PreferFP, ForSimm);
}
StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
- const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE,
- Register &FrameReg, bool PreferFP, bool ForSimm) const {
+ const MachineFunction &MF, int64_t ObjectOffset, bool isFixed,
+ TargetStackID::Value StackID, Register &FrameReg, bool PreferFP,
+ bool ForSimm) const {
const auto &MFI = MF.getFrameInfo();
const auto *RegInfo = static_cast<const AArch64RegisterInfo *>(
MF.getSubtarget().getRegisterInfo());
@@ -1312,8 +1358,11 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
int64_t Offset = getStackOffset(MF, ObjectOffset).getFixed();
bool isCSR =
!isFixed && ObjectOffset >= -((int)AFI->getCalleeSavedStackSize(MFI));
+ bool isSVE = MFI.isScalableStackID(StackID);
- const StackOffset &SVEStackSize = getSVEStackSize(MF);
+ StackOffset ZPRStackSize = getZPRStackSize(MF);
+ StackOffset PPRStackSize = getPPRStackSize(MF);
+ StackOffset SVEStackSize = ZPRStackSize + PPRStackSize;
// Use frame pointer to reference fixed objects. Use it for locals if
// there are VLAs or a dynamically realigned SP (and thus the SP isn't
@@ -1388,12 +1437,25 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize();
if (isSVE) {
- StackOffset FPOffset =
- StackOffset::get(-AFI->getCalleeSaveBaseToFrameRecordOffset(), ObjectOffset);
+ StackOffset FPOffset = StackOffset::get(
+ -AFI->getCalleeSaveBaseToFrameRecordOffset(), ObjectOffset);
StackOffset SPOffset =
SVEStackSize +
StackOffset::get(MFI.getStackSize() - AFI->getCalleeSavedStackSize(),
ObjectOffset);
+
+ // With split SVE objects the ObjectOffset is relative to the split area
+ // (i.e. the PPR area or ZPR area respectively).
+ if (AFI->hasSplitSVEObjects() && StackID == TargetStackID::ScalableVector) {
+ // If we're accessing an SVE vector with split SVE objects...
+ // - From the FP we need to move down past the PPR area:
+ FPOffset -= PPRStackSize;
+ // - From the SP we only need to move up to the ZPR area:
+ SPOffset -= PPRStackSize;
+ // Note: `SPOffset = SVEStackSize + ...`, so `-= PPRStackSize` results in
+ // `SPOffset = ZPRStackSize + ...`.
+ }
+
if (FPAfterSVECalleeSaves) {
FPOffset += StackOffset::getScalable(AFI->getSVECalleeSavedStackSize());
if (-ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize()) {
@@ -1401,6 +1463,7 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
SPOffset += StackOffset::getFixed(AFI->getCalleeSavedStackSize());
}
}
+
// Always use the FP for SVE spills if available and beneficial.
if (hasFP(MF) && (SPOffset.getFixed() ||
FPOffset.getScalable() < SPOffset.getScalable() ||
@@ -1408,13 +1471,13 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
FrameReg = RegInfo->getFrameRegister(MF);
return FPOffset;
}
-
FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
: (unsigned)AArch64::SP;
+
return SPOffset;
}
- StackOffset ScalableOffset = {};
+ StackOffset SVEAreaOffset = {};
if (FPAfterSVECalleeSaves) {
// In this stack layout, the FP is in between the callee saves and other
// SVE allocations.
@@ -1422,25 +1485,25 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
StackOffset::getScalable(AFI->getSVECalleeSavedStackSize());
if (UseFP) {
if (isFixed)
- ScalableOffset = SVECalleeSavedStack;
+ SVEAreaOffset = SVECalleeSavedStack;
else if (!isCSR)
- ScalableOffset = SVECalleeSavedStack - SVEStackSize;
+ SVEAreaOffset = SVECalleeSavedStack - SVEStackSize;
} else {
if (isFixed)
- ScalableOffset = SVEStackSize;
+ SVEAreaOffset = SVEStackSize;
else if (isCSR)
- ScalableOffset = SVEStackSize - SVECalleeSavedStack;
+ SVEAreaOffset = SVEStackSize - SVECalleeSavedStack;
}
} else {
if (UseFP && !(isFixed || isCSR))
- ScalableOffset = -SVEStackSize;
+ SVEAreaOffset = -SVEStackSize;
if (!UseFP && (isFixed || isCSR))
- ScalableOffset = SVEStackSize;
+ SVEAreaOffset = SVEStackSize;
}
if (UseFP) {
FrameReg = RegInfo->getFrameRegister(MF);
- return StackOffset::getFixed(FPOffset) + ScalableOffset;
+ return StackOffset::getFixed(FPOffset) + SVEAreaOffset;
}
// Use the base pointer if we have one.
@@ -1457,7 +1520,7 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
Offset -= AFI->getLocalStackSize();
}
- return StackOffset::getFixed(Offset) + ScalableOffset;
+ return StackOffset::getFixed(Offset) + SVEAreaOffset;
}
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) {
@@ -1614,11 +1677,25 @@ void computeCalleeSaveRegisterPairs(const AArch64FrameLowering &AFL,
RegInc = -1;
FirstReg = Count - 1;
}
+
bool FPAfterSVECalleeSaves = IsWindows && AFI->getSVECalleeSavedStackSize();
- int ScalableByteOffset =
- FPAfterSVECalleeSaves ? 0 : AFI->getSVECalleeSavedStackSize();
+
+ int ZPRByteOffset = 0;
+ int PPRByteOffset = 0;
+ bool SplitPPRs = AFI->hasSplitSVEObjects();
+ if (SplitPPRs) {
+ ZPRByteOffset = AFI->getZPRCalleeSavedStackSize();
+ PPRByteOffset = AFI->getPPRCalleeSavedStackSize();
+ } else if (!FPAfterSVECalleeSaves) {
+ ZPRByteOffset =
+ AFI->getZPRCalleeSavedStackSize() + AFI->getPPRCalleeSavedStackSize();
+ // Unused: Everything goes in ZPR space.
+ PPRByteOffset = 0;
+ }
+
bool NeedGapToAlignStack = AFI->hasCalleeSaveStackFreeSpace();
Register LastReg = 0;
+ bool HasCSHazardPadding = AFI->hasStackHazardSlotIndex() && !SplitPPRs;
// When iterating backwards, the loop condition relies on unsigned wraparound.
for (unsigned i = FirstReg; i < Count; i += RegInc) {
@@ -1647,8 +1724,12 @@ void computeCalleeSaveRegisterPairs(const AArch64FrameLowering &AFL,
llvm_unreachable("Unsupported register class.");
}
+ int &ScalableByteOffset = RPI.Type == RegPairInfo::PPR && SplitPPRs
+ ? PPRByteOffset
+ : ZPRByteOffset;
+
// Add the stack hazard size as we transition from GPR->FPR CSRs.
- if (AFI->hasStackHazardSlotIndex() &&
+ if (HasCSHazardPadding &&
(!LastReg || !AArch64InstrInfo::isFpOrNEON(LastReg)) &&
AArch64InstrInfo::isFpOrNEON(RPI.Reg1))
ByteOffset += StackFillDir * StackHazardSize;
@@ -1656,7 +1737,7 @@ void computeCalleeSaveRegisterPairs(const AArch64FrameLowering &AFL,
int Scale = TRI->getSpillSize(*RPI.RC);
// Add the next reg to the pair if it is in the same register class.
- if (unsigned(i + RegInc) < Count && !AFI->hasStackHazardSlotIndex()) {
+ if (unsigned(i + RegInc) < Count && !HasCSHazardPadding) {
MCRegister NextReg = CSI[i + RegInc].getReg();
bool IsFirst = i == FirstReg;
switch (RPI.Type) {
@@ -2021,10 +2102,14 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
}
// Update the StackIDs of the SVE stack slots.
MachineFrameInfo &MFI = MF.getFrameInfo();
- if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) {
+ if (RPI.Type == RegPairInfo::ZPR) {
MFI.setStackID(FrameIdxReg1, TargetStackID::ScalableVector);
if (RPI.isPaired())
MFI.setStackID(FrameIdxReg2, TargetStackID::ScalableVector);
+ } else if (RPI.Type == RegPairInfo::PPR) {
+ MFI.setStackID(FrameIdxReg1, TargetStackID::ScalablePredicateVector);
+ if (RPI.isPaired())
+ MFI.setStackID(FrameIdxReg2, TargetStackID::ScalablePredicateVector);
}
}
return true;
@@ -2199,6 +2284,13 @@ static std::optional<int> getLdStFrameID(const MachineInstr &MI,
return getMMOFrameID(*MI.memoperands_begin(), MFI);
}
+// Returns true if the LDST MachineInstr \p MI is a PPR access.
+static bool isPPRAccess(const MachineInstr &MI) {
+ return MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&
+ MI.getOpcode() != AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO &&
+ AArch64::PPRRegClass.contains(MI.getOperand(0).getReg());
+}
+
// Check if a Hazard slot is needed for the current function, and if so create
// one for it. The index is stored in AArch64FunctionInfo->StackHazardSlotIndex,
// which can be used to determine if any hazard padding is needed.
@@ -2222,26 +2314,50 @@ void AArch64FrameLowering::determineStackHazardSlot(
bool HasFPRCSRs = any_of(SavedRegs.set_bits(), [](unsigned Reg) {
return AArch64::FPR64RegClass.contains(Reg) ||
AArch64::FPR128RegClass.contains(Reg) ||
- AArch64::ZPRRegClass.contains(Reg) ||
- AArch64::PPRRegClass.contains(Reg);
+ AArch64::ZPRRegClass.contains(Reg);
+ });
+ bool HasPPRCSRs = any_of(SavedRegs.set_bits(), [](unsigned Reg) {
+ return AArch64::PPRRegClass.contains(Reg);
});
bool HasFPRStackObjects = false;
- if (!HasFPRCSRs) {
- std::vector<unsigned> FrameObjects(MFI.getObjectIndexEnd());
+ bool HasPPRStackObjects = false;
+ if (!HasFPRCSRs || SplitSVEObjects) {
+ enum SlotType : uint8_t {
+ Unknown = 0,
+ ZPRorFPR = 1 << 0,
+ PPR = 1 << 1,
+ GPR = 1 << 2,
+ LLVM_MARK_AS_BITMASK_ENUM(GPR)
+ };
+
+ // Find stack slots solely used for one kind of register (ZPR, PPR, etc.),
+ // based on the kinds of accesses used in the function.
+ SmallVector<SlotType> SlotTypes(MFI.getObjectIndexEnd(), SlotType::Unknown);
for (auto &MBB : MF) {
for (auto &MI : MBB) {
std::optional<int> FI = getLdStFrameID(MI, MFI);
- if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) {
- if (MFI.getStackID(*FI) == TargetStackID::ScalableVector ||
- AArch64InstrInfo::isFpOrNEON(MI))
- FrameObjects[*FI] |= 2;
- else
- FrameObjects[*FI] |= 1;
+ if (!FI || FI < 0 || FI > int(SlotTypes.size()))
+ continue;
+ if (MFI.hasScalableStackID(*FI)) {
+ SlotTypes[*FI] |=
+ isPPRAccess(MI) ? SlotType::PPR : SlotType::ZPRorFPR;
+ } else {
+ SlotTypes[*FI] |= AArch64InstrInfo::isFpOrNEON(MI)
+ ? SlotType::ZPRorFPR
+ : SlotType::GPR;
}
}
}
- HasFPRStackObjects =
- any_of(FrameObjects, [](unsigned B) { return (B & 3) == 2; });
+
+ for (int FI = 0; FI < int(SlotTypes.size()); ++FI) {
+ HasFPRStackObjects |= SlotTypes[FI] == SlotType::ZPRorFPR;
+ // For SplitSVEObjects remember that this stack slot is a predicate, this
+ // will be needed later when determining the frame layout.
+ if (SlotTypes[FI] == SlotType::PPR) {
+ MFI.setStackID(FI, TargetStackID::ScalablePredicateVector);
+ HasPPRStackObjects = true;
+ }
+ }
}
if (HasFPRCSRs || HasFPRStackObjects) {
@@ -2250,6 +2366,78 @@ void AArch64FrameLowering::determineStackHazardSlot(
<< StackHazardSize << "\n");
AFI->setStackHazardSlotIndex(ID);
}
+
+ // Determine if we should use SplitSVEObjects. This should only be used if
+ // there's a possibility of a stack hazard between PPRs and ZPRs or FPRs.
+ if (SplitSVEObjects) {
+ if (!HasPPRCSRs && !HasPPRStackObjects) {
+ LLVM_DEBUG(
+ dbgs() << "Not using SplitSVEObjects as no PPRs are on the stack\n");
+ return;
+ }
+
+ if (!HasFPRCSRs && !HasFPRStackObjects) {
+ LLVM_DEBUG(
+ dbgs()
+ << "Not using SplitSVEObjects as no FPRs or ZPRs are on the stack\n");
+ return;
+ }
+
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ if (MFI.hasVarSizedObjects() || TRI->hasStackRealignment(MF)) {
+ LLVM_DEBUG(dbgs() << "SplitSVEObjects is not supported with variable "
+ "sized objects or realignment\n");
+ return;
+ }
+
+ if (arePPRsSpilledAsZPR(MF)) {
+ LLVM_DEBUG(dbgs() << "SplitSVEObjects is not supported with "
+ "-aarch64-enable-zpr-predicate-spills");
+ return;
+ }
+
+ // If another calling convention is explicitly set FPRs can't be promoted to
+ // ZPR callee-saves.
+ if (!is_contained({CallingConv::C, CallingConv::Fast,
+ CallingConv::AArch64_SVE_VectorCall},
+ MF.getFunction().getCallingConv())) {
+ LLVM_DEBUG(
+ dbgs() << "Calling convention is not supported with SplitSVEObjects");
+ return;
+ }
+
+ [[maybe_unused]] const AArch64Subtarget &Subtarget =
+ MF.getSubtarget<AArch64Subtarget>();
+ assert(Subtarget.isSVEorStreamingSVEAvailable() &&
+ "Expected SVE to be available for PPRs");
+
+ // With SplitSVEObjects the CS hazard padding is placed between the
+ // PPRs and ZPRs. If there are any FPR CS there would be a hazard between
+ // them and the CS GRPs. Avoid this by promoting all FPR CS to ZPRs.
+ BitVector FPRZRegs(SavedRegs.size());
+ for (size_t Reg = 0, E = SavedRegs.size(); HasFPRCSRs && Reg < E; ++Reg) {
+ BitVector::reference RegBit = SavedRegs[Reg];
+ if (!RegBit)
+ continue;
+ unsigned SubRegIdx = 0;
+ if (AArch64::FPR64RegClass.contains(Reg))
+ SubRegIdx = AArch64::dsub;
+ else if (AArch64::FPR128RegClass.contains(Reg))
+ SubRegIdx = AArch64::zsub;
+ else
+ continue;
+ // Clear the bit for the FPR save.
+ RegBit = false;
+ // Mark that we should save the corresponding ZPR.
+ Register ZReg =
+ TRI->getMatchingSuperReg(Reg, SubRegIdx, &AArch64::ZPRRegClass);
+ FPRZRegs.set(ZReg);
+ }
+ SavedRegs |= FPRZRegs;
+
+ AFI->setSplitSVEObjects(true);
+ LLVM_DEBUG(dbgs() << "SplitSVEObjects enabled!\n");
+ }
}
void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
@@ -2260,10 +2448,11 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
+ const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
+
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
MF.getSubtarget().getRegisterInfo());
- const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
unsigned UnspilledCSGPR = AArch64::NoRegister;
unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
@@ -2382,17 +2571,26 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
SavedRegs.set(AArch64::X18);
}
+ // Determine if a Hazard slot should be used and where it should go.
+ // If SplitSVEObjects is used, the hazard padding is placed between the PPRs
+ // and ZPRs. Otherwise, it goes in the callee save area.
+ determineStackHazardSlot(MF, SavedRegs);
+
// Calculates the callee saved stack size.
unsigned CSStackSize = 0;
- unsigned SVECSStackSize = 0;
+ unsigned ZPRCSStackSize = 0;
+ unsigned PPRCSStackSize = 0;
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
for (unsigned Reg : SavedRegs.set_bits()) {
auto *RC = TRI->getMinimalPhysRegClass(Reg);
assert(RC && "expected register class!");
auto SpillSize = TRI->getSpillSize(*RC);
- if (AArch64::PPRRegClass.contains(Reg) ||
- AArch64::ZPRRegClass.contains(Reg))
- SVECSStackSize += SpillSize;
+ bool IsZPR = AArch64::ZPRRegClass.contains(Reg);
+ bool IsPPR = !IsZPR && AArch64::PPRRegClass.contains(Reg);
+ if (IsZPR || (IsPPR && arePPRsSpilledAsZPR(MF)))
+ ZPRCSStackSize += SpillSize;
+ else if (IsPPR)
+ PPRCSStackSize += SpillSize;
else
CSStackSize += SpillSize;
}
@@ -2402,17 +2600,15 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
// only 64-bit GPRs can be added to SavedRegs.
unsigned NumSavedRegs = SavedRegs.count();
+ // If we have hazard padding in the CS area add that to the size.
+ if (AFI->isStackHazardIncludedInCalleeSaveArea())
+ CSStackSize += getStackHazardSize(MF);
+
// Increase the callee-saved stack size if the function has streaming mode
// changes, as we will need to spill the value of the VG register.
if (requiresSaveVG(MF))
CSStackSize += 8;
- // Determine if a Hazard slot should be used, and increase the CSStackSize by
- // StackHazardSize if so.
- determineStackHazardSlot(MF, SavedRegs);
- if (AFI->hasStackHazardSlotIndex())
- CSStackSize += getStackHazardSize(MF);
-
// If we must call __arm_get_current_vg in the prologue preserve the LR.
if (requiresSaveVG(MF) && !Subtarget.hasSVE())
SavedRegs.set(AArch64::LR);
@@ -2433,8 +2629,11 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
});
// If any callee-saved registers are used, the frame cannot be eliminated.
- int64_t SVEStackSize =
- alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16);
+ auto [ZPRLocalStackSize, PPRLocalStackSize] =
+ determineSVEStackSizes(MF, AssignObjectOffsets::No);
+ uint64_t SVELocals = ZPRLocalStackSize + PPRLocalStackSize;
+ uint64_t SVEStackSize =
+ alignTo(ZPRCSStackSize + PPRCSStackSize + SVELocals, 16);
bool CanEliminateFrame = (SavedRegs.count() == 0) && !SVEStackSize;
// The CSR spill slots have not been allocated yet, so estimateStackSize
@@ -2519,7 +2718,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
// instructions.
AFI->setCalleeSavedStackSize(AlignedCSStackSize);
AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize);
- AFI->setSVECalleeSavedStackSize(alignTo(SVECSStackSize, 16));
+ AFI->setSVECalleeSavedStackSize(ZPRCSStackSize, alignTo(PPRCSStackSize, 16));
}
bool AArch64FrameLowering::assignCalleeSavedSpillSlots(
@@ -2572,7 +2771,7 @@ bool AArch64FrameLowering::assignCalleeSavedSpillSlots(
const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
// Create a hazard slot as we switch between GPR and FPR CSRs.
- if (AFI->hasStackHazardSlotIndex() &&
+ if (AFI->isStackHazardIncludedInCalleeSaveArea() &&
(!LastReg || !AArch64InstrInfo::isFpOrNEON(LastReg)) &&
AArch64InstrInfo::isFpOrNEON(Reg)) {
assert(HazardSlotIndex == std::numeric_limits<int>::max() &&
@@ -2611,7 +2810,7 @@ bool AArch64FrameLowering::assignCalleeSavedSpillSlots(
}
// Add hazard slot in the case where no FPR CSRs are present.
- if (AFI->hasStackHazardSlotIndex() &&
+ if (AFI->isStackHazardIncludedInCalleeSaveArea() &&
HazardSlotIndex == std::numeric_limits<int>::max()) {
HazardSlotIndex = MFI.CreateStackObject(StackHazardSize, Align(8), true);
LLVM_DEBUG(dbgs() << "Created CSR Hazard at slot " << HazardSlotIndex
@@ -2658,7 +2857,6 @@ static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI,
assert((Max == std::numeric_limits<int>::min() ||
Max + 1 == CS.getFrameIdx()) &&
"SVE CalleeSaves are not consecutive");
-
Min = std::min(Min, CS.getFrameIdx());
Max = std::max(Max, CS.getFrameIdx());
}
@@ -2666,43 +2864,64 @@ static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI,
return Min != std::numeric_limits<int>::max();
}
-// Process all the SVE stack objects and determine offsets for each
-// object. If AssignOffsets is true, the offsets get assigned.
-// Fills in the first and last callee-saved frame indices into
-// Min/MaxCSFrameIndex, respectively.
-// Returns the size of the stack.
-static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
- int &MinCSFrameIndex,
- int &MaxCSFrameIndex,
- bool AssignOffsets) {
+static SVEStackSizes determineSVEStackSizes(MachineFunction &MF,
+ AssignObjectOffsets AssignOffsets) {
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ auto *AFI = MF.getInfo<AArch64FunctionInfo>();
+
+ SVEStackSizes SVEStack{};
+
+ // With SplitSVEObjects we maintain separate stack offsets for predicates
+ // (PPRs) and SVE vectors (ZPRs). When SplitSVEObjects is disabled predicates
+ // are included in the SVE vector area.
+ uint64_t &ZPRStackTop = SVEStack.ZPRStackSize;
+ uint64_t &PPRStackTop =
+ AFI->hasSplitSVEObjects() ? SVEStack.PPRStackSize : SVEStack.ZPRStackSize;
+
#ifndef NDEBUG
// First process all fixed stack objects.
for (int I = MFI.getObjectIndexBegin(); I != 0; ++I)
- assert(MFI.getStackID(I) != TargetStackID::ScalableVector &&
+ assert(!MFI.hasScalableStackID(I) &&
"SVE vectors should never be passed on the stack by value, only by "
"reference.");
#endif
- auto Assign = [&MFI](int FI, int64_t Offset) {
+ auto AllocateObject = [&](int FI) {
+ uint64_t &StackTop = MFI.getStackID(FI) == TargetStackID::ScalableVector
+ ? ZPRStackTop
+ : PPRStackTop;
+
+ // FIXME: Given that the length of SVE vectors is not necessarily a power of
+ // two, we'd need to align every object dynamically at runtime if the
+ // alignment is larger than 16. This is not yet supported.
+ Align Alignment = MFI.getObjectAlign(FI);
+ if (Alignment > Align(16))
+ report_fatal_error(
+ "Alignment of scalable vectors > 16 bytes is not yet supported");
+
+ StackTop += MFI.getObjectSize(FI);
+ StackTop = alignTo(StackTop, Alignment);
+
+ assert(StackTop < std::numeric_limits<int64_t>::max() &&
+ "SVE StackTop far too large?!");
+
+ int64_t Offset = -int64_t(StackTop);
+ if (AssignOffsets == AssignObjectOffsets::Yes)
+ MFI.setObjectOffset(FI, Offset);
+
LLVM_DEBUG(dbgs() << "alloc FI(" << FI << ") at SP[" << Offset << "]\n");
- MFI.setObjectOffset(FI, Offset);
};
- int64_t Offset = 0;
-
// Then process all callee saved slots.
+ int MinCSFrameIndex, MaxCSFrameIndex;
if (getSVECalleeSaveSlotRange(MFI, MinCSFrameIndex, MaxCSFrameIndex)) {
- // Assign offsets to the callee save slots.
- for (int I = MinCSFrameIndex; I <= MaxCSFrameIndex; ++I) {
- Offset += MFI.getObjectSize(I);
- Offset = alignTo(Offset, MFI.getObjectAlign(I));
- if (AssignOffsets)
- Assign(I, -Offset);
- }
+ for (int FI = MinCSFrameIndex; FI <= MaxCSFrameIndex; ++FI)
+ AllocateObject(FI);
}
- // Ensure that the Callee-save area is aligned to 16bytes.
- Offset = alignTo(Offset, Align(16U));
+ // Ensure the CS area is 16-byte aligned.
+ PPRStackTop = alignTo(PPRStackTop, Align(16U));
+ ZPRStackTop = alignTo(ZPRStackTop, Align(16U));
// Create a buffer of SVE objects to allocate and sort it.
SmallVector<int, 8> ObjectsToAllocate;
@@ -2715,48 +2934,31 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
if (MFI.getStackID(StackProtectorFI) == TargetStackID::ScalableVector)
ObjectsToAllocate.push_back(StackProtectorFI);
}
- for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
- unsigned StackID = MFI.getStackID(I);
- if (StackID != TargetStackID::ScalableVector)
- continue;
- if (I == StackProtectorFI)
+
+ for (int FI = 0, E = MFI.getObjectIndexEnd(); FI != E; ++FI) {
+ if (FI == StackProtectorFI || MFI.isDeadObjectIndex(FI))
continue;
- if (MaxCSFrameIndex >= I && I >= MinCSFrameIndex)
+ if (MaxCSFrameIndex >= FI && FI >= MinCSFrameIndex)
continue;
- if (MFI.isDeadObjectIndex(I))
+
+ if (MFI.getStackID(FI) != TargetStackID::ScalableVector &&
+ MFI.getStackID(FI) != TargetStackID::ScalablePredicateVector)
continue;
- ObjectsToAllocate.push_back(I);
+ ObjectsToAllocate.push_back(FI);
}
// Allocate all SVE locals and spills
- for (unsigned FI : ObjectsToAllocate) {
- Align Alignment = MFI.getObjectAlign(FI);
- // FIXME: Given that the length of SVE vectors is not necessarily a power of
- // two, we'd need to align every object dynamically at runtime if the
- // alignment is larger than 16. This is not yet supported.
- if (Alignment > Align(16))
- report_fatal_error(
- "Alignment of scalable vectors > 16 bytes is not yet supported");
-
- Offset = alignTo(Offset + MFI.getObjectSize(FI), Alignment);
- if (AssignOffsets)
- Assign(FI, -Offset);
- }
+ for (unsigned FI : ObjectsToAllocate)
+ AllocateObject(FI);
- return Offset;
-}
+ PPRStackTop = alignTo(PPRStackTop, Align(16U));
+ ZPRStackTop = alignTo(ZPRStackTop, Align(16U));
-int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets(
- MachineFrameInfo &MFI) const {
- int MinCSFrameIndex, MaxCSFrameIndex;
- return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, false);
-}
+ if (AssignOffsets == AssignObjectOffsets::Yes)
+ AFI->setStackSizeSVE(SVEStack.ZPRStackSize, SVEStack.PPRStackSize);
-int64_t AArch64FrameLowering::assignSVEStackObjectOffsets(
- MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex) const {
- return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex,
- true);
+ return SVEStack;
}
/// Attempts to scavenge a register from \p ScavengeableRegs given the used
@@ -3070,12 +3272,7 @@ void AArch64FrameLowering::processFunctionBeforeFrameFinalized(
assert(getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown &&
"Upwards growing stack unsupported");
- int MinCSFrameIndex, MaxCSFrameIndex;
- int64_t SVEStackSize =
- assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex);
-
- AFI->setStackSizeSVE(alignTo(SVEStackSize, 16U));
- AFI->setMinMaxSVECSFrameIndex(MinCSFrameIndex, MaxCSFrameIndex);
+ (void)determineSVEStackSizes(MF, AssignObjectOffsets::Yes);
// If this function isn't doing Win64-style C++ EH, we don't need to do
// anything.
@@ -3359,7 +3556,8 @@ void TagStoreEdit::emitCode(MachineBasicBlock::iterator &InsertI,
Register Reg;
FrameRegOffset = TFI->resolveFrameOffsetReference(
- *MF, FirstTagStore.Offset, false /*isFixed*/, false /*isSVE*/, Reg,
+ *MF, FirstTagStore.Offset, false /*isFixed*/,
+ TargetStackID::Default /*StackID*/, Reg,
/*PreferFP=*/false, /*ForSimm=*/true);
FrameReg = Reg;
FrameRegUpdate = std::nullopt;
@@ -3597,7 +3795,7 @@ StackOffset AArch64FrameLowering::getFrameIndexReferencePreferSP(
// Go to common code if we cannot provide sp + offset.
if (MFI.hasVarSizedObjects() ||
- MF.getInfo<AArch64FunctionInfo>()->getStackSizeSVE() ||
+ MF.getInfo<AArch64FunctionInfo>()->hasSVEStackSize() ||
MF.getSubtarget().getRegisterInfo()->hasStackRealignment(MF))
return getFrameIndexReference(MF, FI, FrameReg);
@@ -3699,10 +3897,12 @@ bool FrameObjectCompare(const FrameObject &A, const FrameObject &B) {
void AArch64FrameLowering::orderFrameObjects(
const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
- if (!OrderFrameObjects || ObjectsToAllocate.empty())
+ const AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>();
+
+ if ((!OrderFrameObjects && !AFI.hasSplitSVEObjects()) ||
+ ObjectsToAllocate.empty())
return;
- const AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>();
const MachineFrameInfo &MFI = MF.getFrameInfo();
std::vector<FrameObject> FrameObjects(MFI.getObjectIndexEnd());
for (auto &Obj : ObjectsToAllocate) {
@@ -4080,7 +4280,7 @@ void AArch64FrameLowering::emitRemarks(
}
unsigned RegTy = StackAccess::AccessType::GPR;
- if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) {
+ if (MFI.hasScalableStackID(FrameIdx)) {
// SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO
// spill/fill the predicate as a data vector (so are an FPR access).
if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
index 7bba053..32a9bd8 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
@@ -24,6 +24,11 @@ class AArch64FunctionInfo;
class AArch64PrologueEmitter;
class AArch64EpilogueEmitter;
+struct SVEStackSizes {
+ uint64_t ZPRStackSize{0};
+ uint64_t PPRStackSize{0};
+};
+
class AArch64FrameLowering : public TargetFrameLowering {
public:
explicit AArch64FrameLowering()
@@ -64,8 +69,9 @@ public:
bool ForSimm) const;
StackOffset resolveFrameOffsetReference(const MachineFunction &MF,
int64_t ObjectOffset, bool isFixed,
- bool isSVE, Register &FrameReg,
- bool PreferFP, bool ForSimm) const;
+ TargetStackID::Value StackID,
+ Register &FrameReg, bool PreferFP,
+ bool ForSimm) const;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
ArrayRef<CalleeSavedInfo> CSI,
@@ -124,6 +130,7 @@ public:
return false;
case TargetStackID::Default:
case TargetStackID::ScalableVector:
+ case TargetStackID::ScalablePredicateVector:
case TargetStackID::NoAlloc:
return true;
}
@@ -132,7 +139,8 @@ public:
bool isStackIdSafeForLocalArea(unsigned StackId) const override {
// We don't support putting SVE objects into the pre-allocated local
// frame block at the moment.
- return StackId != TargetStackID::ScalableVector;
+ return (StackId != TargetStackID::ScalableVector &&
+ StackId != TargetStackID::ScalablePredicateVector);
}
void
@@ -145,7 +153,17 @@ public:
bool requiresSaveVG(const MachineFunction &MF) const;
- StackOffset getSVEStackSize(const MachineFunction &MF) const;
+ /// Returns the size of the entire ZPR stackframe (calleesaves + spills).
+ StackOffset getZPRStackSize(const MachineFunction &MF) const;
+
+ /// Returns the size of the entire PPR stackframe (calleesaves + spills +
+ /// hazard padding).
+ StackOffset getPPRStackSize(const MachineFunction &MF) const;
+
+ /// Returns the size of the entire SVE stackframe (PPRs + ZPRs).
+ StackOffset getSVEStackSize(const MachineFunction &MF) const {
+ return getZPRStackSize(MF) + getPPRStackSize(MF);
+ }
friend class AArch64PrologueEpilogueCommon;
friend class AArch64PrologueEmitter;
@@ -165,10 +183,6 @@ private:
/// Returns true if CSRs should be paired.
bool producePairRegisters(MachineFunction &MF) const;
- int64_t estimateSVEStackObjectOffsets(MachineFrameInfo &MF) const;
- int64_t assignSVEStackObjectOffsets(MachineFrameInfo &MF,
- int &MinCSFrameIndex,
- int &MaxCSFrameIndex) const;
/// Make a determination whether a Hazard slot is used and create it if
/// needed.
void determineStackHazardSlot(MachineFunction &MF,
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 6a1b06e..e7b2d20 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -2089,7 +2089,8 @@ void AArch64DAGToDAGISel::SelectMultiVectorLutiLane(SDNode *Node,
if (!ImmToReg<AArch64::ZT0, 0>(Node->getOperand(2), ZtValue))
return;
- SDValue Ops[] = {ZtValue, Node->getOperand(3), Node->getOperand(4)};
+ SDValue Chain = Node->getOperand(0);
+ SDValue Ops[] = {ZtValue, Node->getOperand(3), Node->getOperand(4), Chain};
SDLoc DL(Node);
EVT VT = Node->getValueType(0);
@@ -2110,14 +2111,15 @@ void AArch64DAGToDAGISel::SelectMultiVectorLutiLane(SDNode *Node,
void AArch64DAGToDAGISel::SelectMultiVectorLuti(SDNode *Node,
unsigned NumOutVecs,
unsigned Opc) {
-
SDValue ZtValue;
- SmallVector<SDValue, 4> Ops;
if (!ImmToReg<AArch64::ZT0, 0>(Node->getOperand(2), ZtValue))
return;
- Ops.push_back(ZtValue);
- Ops.push_back(createZMulTuple({Node->getOperand(3), Node->getOperand(4)}));
+ SDValue Chain = Node->getOperand(0);
+ SDValue Ops[] = {ZtValue,
+ createZMulTuple({Node->getOperand(3), Node->getOperand(4)}),
+ Chain};
+
SDLoc DL(Node);
EVT VT = Node->getValueType(0);
@@ -7495,7 +7497,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
int FI = cast<FrameIndexSDNode>(N)->getIndex();
// We can only encode VL scaled offsets, so only fold in frame indexes
// referencing SVE objects.
- if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
+ if (MFI.hasScalableStackID(FI)) {
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);
return true;
@@ -7541,7 +7543,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
// We can only encode VL scaled offsets, so only fold in frame indexes
// referencing SVE objects.
- if (MFI.getStackID(FI) == TargetStackID::ScalableVector)
+ if (MFI.hasScalableStackID(FI))
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 45f5235..70d5ad7d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1537,6 +1537,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FP_TO_UINT, VT, Custom);
setOperationAction(ISD::FP_TO_SINT, VT, Custom);
setOperationAction(ISD::MLOAD, VT, Custom);
+ setOperationAction(ISD::MSTORE, VT, Legal);
setOperationAction(ISD::MUL, VT, Custom);
setOperationAction(ISD::MULHS, VT, Custom);
setOperationAction(ISD::MULHU, VT, Custom);
@@ -6617,7 +6618,6 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
"llvm.eh.recoverfp must take a function as the first argument");
return IncomingFPOp;
}
-
case Intrinsic::aarch64_neon_vsri:
case Intrinsic::aarch64_neon_vsli:
case Intrinsic::aarch64_sve_sri:
@@ -9256,8 +9256,7 @@ void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
(MI.getOpcode() == AArch64::ADDXri ||
MI.getOpcode() == AArch64::SUBXri)) {
const MachineOperand &MO = MI.getOperand(1);
- if (MO.isFI() && MF.getFrameInfo().getStackID(MO.getIndex()) ==
- TargetStackID::ScalableVector)
+ if (MO.isFI() && MF.getFrameInfo().hasScalableStackID(MO.getIndex()))
MI.addOperand(MachineOperand::CreateReg(AArch64::VG, /*IsDef=*/false,
/*IsImplicit=*/true));
}
@@ -9704,8 +9703,12 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty);
MachineFrameInfo &MFI = MF.getFrameInfo();
int FI = MFI.CreateStackObject(StoreSize, Alignment, false);
- if (isScalable)
- MFI.setStackID(FI, TargetStackID::ScalableVector);
+ if (isScalable) {
+ bool IsPred = VA.getValVT() == MVT::aarch64svcount ||
+ VA.getValVT().getVectorElementType() == MVT::i1;
+ MFI.setStackID(FI, IsPred ? TargetStackID::ScalablePredicateVector
+ : TargetStackID::ScalableVector);
+ }
MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
SDValue Ptr = DAG.getFrameIndex(
@@ -15154,9 +15157,7 @@ static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) {
: Shift.getOperand(1);
unsigned Inst = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI;
- SDValue ResultSLI = DAG.getNode(Inst, DL, VT, X, Y, Imm);
-
- return ResultSLI;
+ return DAG.getNode(Inst, DL, VT, X, Y, Imm);
}
static SDValue tryLowerToBSL(SDValue N, SelectionDAG &DAG) {
@@ -27234,6 +27235,21 @@ static bool isLanes1toNKnownZero(SDValue Op) {
}
}
+// Return true if the vector operation can guarantee that the first lane of its
+// result is active.
+static bool isLane0KnownActive(SDValue Op) {
+ switch (Op.getOpcode()) {
+ default:
+ return false;
+ case AArch64ISD::REINTERPRET_CAST:
+ return isLane0KnownActive(Op->getOperand(0));
+ case ISD::SPLAT_VECTOR:
+ return isOneConstant(Op.getOperand(0));
+ case AArch64ISD::PTRUE:
+ return Op.getConstantOperandVal(0) == AArch64SVEPredPattern::all;
+ };
+}
+
static SDValue removeRedundantInsertVectorElt(SDNode *N) {
assert(N->getOpcode() == ISD::INSERT_VECTOR_ELT && "Unexpected node!");
SDValue InsertVec = N->getOperand(0);
@@ -27519,6 +27535,32 @@ static SDValue performMULLCombine(SDNode *N,
return SDValue();
}
+static SDValue performPTestFirstCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG) {
+ if (DCI.isBeforeLegalize())
+ return SDValue();
+
+ SDLoc DL(N);
+ auto Mask = N->getOperand(0);
+ auto Pred = N->getOperand(1);
+
+ if (!isLane0KnownActive(Mask))
+ return SDValue();
+
+ if (Pred->getOpcode() == AArch64ISD::REINTERPRET_CAST)
+ Pred = Pred->getOperand(0);
+
+ if (Pred->getOpcode() == ISD::CONCAT_VECTORS) {
+ Pred = Pred->getOperand(0);
+ Pred = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Pred);
+ return DAG.getNode(AArch64ISD::PTEST_FIRST, DL, N->getValueType(0), Mask,
+ Pred);
+ }
+
+ return SDValue();
+}
+
static SDValue
performScalarToVectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) {
@@ -27875,6 +27917,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
case AArch64ISD::UMULL:
case AArch64ISD::PMULL:
return performMULLCombine(N, DCI, DAG);
+ case AArch64ISD::PTEST_FIRST:
+ return performPTestFirstCombine(N, DCI, DAG);
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN:
switch (N->getConstantOperandVal(1)) {
@@ -29564,7 +29608,7 @@ void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {
// than doing it here in finalizeLowering.
if (MFI.hasStackProtectorIndex()) {
for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
- if (MFI.getStackID(i) == TargetStackID::ScalableVector &&
+ if (MFI.hasScalableStackID(i) &&
MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) {
MFI.setStackID(MFI.getStackProtectorIndex(),
TargetStackID::ScalableVector);
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index f07d351..6ef0a95 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -10176,28 +10176,6 @@ multiclass SIMDScalarLShiftBHSD<bit U, bits<5> opc, string asm,
(!cast<Instruction>(NAME # "d") FPR64:$Rn, vecshiftL64:$imm)>;
}
-multiclass SIMDScalarRShiftBHSD<bit U, bits<5> opc, string asm> {
- def b : BaseSIMDScalarShift<U, opc, {0,0,0,1,?,?,?},
- FPR8, FPR8, vecshiftR8, asm, []> {
- let Inst{18-16} = imm{2-0};
- }
-
- def h : BaseSIMDScalarShift<U, opc, {0,0,1,?,?,?,?},
- FPR16, FPR16, vecshiftR16, asm, []> {
- let Inst{19-16} = imm{3-0};
- }
-
- def s : BaseSIMDScalarShift<U, opc, {0,1,?,?,?,?,?},
- FPR32, FPR32, vecshiftR32, asm, []> {
- let Inst{20-16} = imm{4-0};
- }
-
- def d : BaseSIMDScalarShift<U, opc, {1,?,?,?,?,?,?},
- FPR64, FPR64, vecshiftR64, asm, []> {
- let Inst{21-16} = imm{5-0};
- }
-}
-
//----------------------------------------------------------------------------
// AdvSIMD vector x indexed element
//----------------------------------------------------------------------------
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 5a51c81..5a90da1 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -1503,6 +1503,13 @@ AArch64InstrInfo::canRemovePTestInstr(MachineInstr *PTest, MachineInstr *Mask,
getElementSizeForOpcode(PredOpcode))
return PredOpcode;
+ // For PTEST_FIRST(PTRUE_ALL, WHILE), the PTEST_FIRST is redundant since
+ // WHILEcc performs an implicit PTEST with an all active mask, setting
+ // the N flag as the PTEST_FIRST would.
+ if (PTest->getOpcode() == AArch64::PTEST_PP_FIRST &&
+ isPTrueOpcode(MaskOpcode) && Mask->getOperand(1).getImm() == 31)
+ return PredOpcode;
+
return {};
}
@@ -5592,7 +5599,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
assert(Subtarget.isSVEorStreamingSVEAvailable() &&
"Unexpected register store without SVE store instructions");
Opc = AArch64::STR_PXI;
- StackID = TargetStackID::ScalableVector;
+ StackID = TargetStackID::ScalablePredicateVector;
}
break;
}
@@ -5607,7 +5614,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
Opc = AArch64::STRSui;
else if (AArch64::PPR2RegClass.hasSubClassEq(RC)) {
Opc = AArch64::STR_PPXI;
- StackID = TargetStackID::ScalableVector;
+ StackID = TargetStackID::ScalablePredicateVector;
}
break;
case 8:
@@ -5777,7 +5784,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
if (IsPNR)
PNRReg = DestReg;
Opc = AArch64::LDR_PXI;
- StackID = TargetStackID::ScalableVector;
+ StackID = TargetStackID::ScalablePredicateVector;
}
break;
}
@@ -5792,7 +5799,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
Opc = AArch64::LDRSui;
else if (AArch64::PPR2RegClass.hasSubClassEq(RC)) {
Opc = AArch64::LDR_PPXI;
- StackID = TargetStackID::ScalableVector;
+ StackID = TargetStackID::ScalablePredicateVector;
}
break;
case 8:
diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
index a81f5b3..b3c9656 100644
--- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
@@ -23,12 +23,21 @@
using namespace llvm;
+static std::optional<uint64_t>
+getSVEStackSize(const AArch64FunctionInfo &MFI,
+ uint64_t (AArch64FunctionInfo::*GetStackSize)() const) {
+ if (!MFI.hasCalculatedStackSizeSVE())
+ return std::nullopt;
+ return (MFI.*GetStackSize)();
+}
+
yaml::AArch64FunctionInfo::AArch64FunctionInfo(
const llvm::AArch64FunctionInfo &MFI)
: HasRedZone(MFI.hasRedZone()),
- StackSizeSVE(MFI.hasCalculatedStackSizeSVE()
- ? std::optional<uint64_t>(MFI.getStackSizeSVE())
- : std::nullopt),
+ StackSizeZPR(
+ getSVEStackSize(MFI, &llvm::AArch64FunctionInfo::getStackSizeZPR)),
+ StackSizePPR(
+ getSVEStackSize(MFI, &llvm::AArch64FunctionInfo::getStackSizePPR)),
HasStackFrame(MFI.hasStackFrame()
? std::optional<bool>(MFI.hasStackFrame())
: std::nullopt) {}
@@ -41,8 +50,9 @@ void AArch64FunctionInfo::initializeBaseYamlFields(
const yaml::AArch64FunctionInfo &YamlMFI) {
if (YamlMFI.HasRedZone)
HasRedZone = YamlMFI.HasRedZone;
- if (YamlMFI.StackSizeSVE)
- setStackSizeSVE(*YamlMFI.StackSizeSVE);
+ if (YamlMFI.StackSizeZPR || YamlMFI.StackSizePPR)
+ setStackSizeSVE(YamlMFI.StackSizeZPR.value_or(0),
+ YamlMFI.StackSizePPR.value_or(0));
if (YamlMFI.HasStackFrame)
setHasStackFrame(*YamlMFI.HasStackFrame);
}
diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
index 897c7e8..91e64e6 100644
--- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
@@ -74,13 +74,10 @@ class AArch64FunctionInfo final : public MachineFunctionInfo {
/// Amount of stack frame size, not including callee-saved registers.
uint64_t LocalStackSize = 0;
- /// The start and end frame indices for the SVE callee saves.
- int MinSVECSFrameIndex = 0;
- int MaxSVECSFrameIndex = 0;
-
/// Amount of stack frame size used for saving callee-saved registers.
unsigned CalleeSavedStackSize = 0;
- unsigned SVECalleeSavedStackSize = 0;
+ unsigned ZPRCalleeSavedStackSize = 0;
+ unsigned PPRCalleeSavedStackSize = 0;
bool HasCalleeSavedStackSize = false;
bool HasSVECalleeSavedStackSize = false;
@@ -137,9 +134,14 @@ class AArch64FunctionInfo final : public MachineFunctionInfo {
/// SVE stack size (for predicates and data vectors) are maintained here
/// rather than in FrameInfo, as the placement and Stack IDs are target
/// specific.
- uint64_t StackSizeSVE = 0;
+ uint64_t StackSizeZPR = 0;
+ uint64_t StackSizePPR = 0;
+
+ /// Are SVE objects (vectors and predicates) split into separate regions on
+ /// the stack.
+ bool SplitSVEObjects = false;
- /// HasCalculatedStackSizeSVE indicates whether StackSizeSVE is valid.
+ /// HasCalculatedStackSizeSVE indicates whether StackSizeZPR/PPR is valid.
bool HasCalculatedStackSizeSVE = false;
/// Has a value when it is known whether or not the function uses a
@@ -312,16 +314,25 @@ public:
TailCallReservedStack = bytes;
}
- bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; }
-
- void setStackSizeSVE(uint64_t S) {
+ void setStackSizeSVE(uint64_t ZPR, uint64_t PPR) {
+ StackSizeZPR = ZPR;
+ StackSizePPR = PPR;
HasCalculatedStackSizeSVE = true;
- StackSizeSVE = S;
}
- uint64_t getStackSizeSVE() const {
+ uint64_t getStackSizeZPR() const {
+ assert(hasCalculatedStackSizeSVE());
+ return StackSizeZPR;
+ }
+ uint64_t getStackSizePPR() const {
assert(hasCalculatedStackSizeSVE());
- return StackSizeSVE;
+ return StackSizePPR;
+ }
+
+ bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; }
+
+ bool hasSVEStackSize() const {
+ return getStackSizeZPR() > 0 || getStackSizePPR() > 0;
}
bool hasStackFrame() const { return HasStackFrame; }
@@ -329,7 +340,6 @@ public:
bool isStackRealigned() const { return StackRealigned; }
void setStackRealigned(bool s) { StackRealigned = s; }
-
bool hasCalleeSaveStackFreeSpace() const {
return CalleeSaveStackHasFreeSpace;
}
@@ -414,29 +424,37 @@ public:
}
// Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
- void setSVECalleeSavedStackSize(unsigned Size) {
- SVECalleeSavedStackSize = Size;
+ void setSVECalleeSavedStackSize(unsigned ZPR, unsigned PPR) {
+ ZPRCalleeSavedStackSize = ZPR;
+ PPRCalleeSavedStackSize = PPR;
HasSVECalleeSavedStackSize = true;
}
- unsigned getSVECalleeSavedStackSize() const {
+ unsigned getZPRCalleeSavedStackSize() const {
assert(HasSVECalleeSavedStackSize &&
- "SVECalleeSavedStackSize has not been calculated");
- return SVECalleeSavedStackSize;
+ "ZPRCalleeSavedStackSize has not been calculated");
+ return ZPRCalleeSavedStackSize;
}
-
- void setMinMaxSVECSFrameIndex(int Min, int Max) {
- MinSVECSFrameIndex = Min;
- MaxSVECSFrameIndex = Max;
+ unsigned getPPRCalleeSavedStackSize() const {
+ assert(HasSVECalleeSavedStackSize &&
+ "PPRCalleeSavedStackSize has not been calculated");
+ return PPRCalleeSavedStackSize;
}
- int getMinSVECSFrameIndex() const { return MinSVECSFrameIndex; }
- int getMaxSVECSFrameIndex() const { return MaxSVECSFrameIndex; }
+ unsigned getSVECalleeSavedStackSize() const {
+ assert(!hasSplitSVEObjects() &&
+ "ZPRs and PPRs are split. Use get[ZPR|PPR]CalleeSavedStackSize()");
+ return getZPRCalleeSavedStackSize() + getPPRCalleeSavedStackSize();
+ }
void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
unsigned getNumLocalDynamicTLSAccesses() const {
return NumLocalDynamicTLSAccesses;
}
+ bool isStackHazardIncludedInCalleeSaveArea() const {
+ return hasStackHazardSlotIndex() && !hasSplitSVEObjects();
+ }
+
std::optional<bool> hasRedZone() const { return HasRedZone; }
void setHasRedZone(bool s) { HasRedZone = s; }
@@ -472,6 +490,15 @@ public:
StackHazardCSRSlotIndex = Index;
}
+ bool hasSplitSVEObjects() const { return SplitSVEObjects; }
+ void setSplitSVEObjects(bool s) { SplitSVEObjects = s; }
+
+ bool hasSVE_AAPCS(const MachineFunction &MF) const {
+ return hasSplitSVEObjects() || isSVECC() ||
+ MF.getFunction().getCallingConv() ==
+ CallingConv::AArch64_SVE_VectorCall;
+ }
+
SMEAttrs getSMEFnAttrs() const { return SMEFnAttrs; }
unsigned getSRetReturnReg() const { return SRetReturnReg; }
@@ -611,7 +638,8 @@ private:
namespace yaml {
struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo {
std::optional<bool> HasRedZone;
- std::optional<uint64_t> StackSizeSVE;
+ std::optional<uint64_t> StackSizeZPR;
+ std::optional<uint64_t> StackSizePPR;
std::optional<bool> HasStackFrame;
AArch64FunctionInfo() = default;
@@ -624,7 +652,8 @@ struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo {
template <> struct MappingTraits<AArch64FunctionInfo> {
static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) {
YamlIO.mapOptional("hasRedZone", MFI.HasRedZone);
- YamlIO.mapOptional("stackSizeSVE", MFI.StackSizeSVE);
+ YamlIO.mapOptional("stackSizeZPR", MFI.StackSizeZPR);
+ YamlIO.mapOptional("stackSizePPR", MFI.StackSizePPR);
YamlIO.mapOptional("hasStackFrame", MFI.HasStackFrame);
}
};
diff --git a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
index 09b3643..aed137c 100644
--- a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
+++ b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
@@ -48,21 +48,19 @@ bool AArch64PrologueEpilogueCommon::isVGInstruction(
return Opc == TargetOpcode::COPY;
}
-// Convenience function to determine whether I is an SVE callee save.
-static bool isSVECalleeSave(MachineBasicBlock::iterator I) {
+// Convenience function to determine whether I is part of the ZPR callee saves.
+static bool isPartOfZPRCalleeSaves(MachineBasicBlock::iterator I) {
switch (I->getOpcode()) {
default:
return false;
- case AArch64::PTRUE_C_B:
case AArch64::LD1B_2Z_IMM:
case AArch64::ST1B_2Z_IMM:
case AArch64::STR_ZXI:
- case AArch64::STR_PXI:
case AArch64::LDR_ZXI:
- case AArch64::LDR_PXI:
- case AArch64::PTRUE_B:
case AArch64::CPY_ZPzI_B:
case AArch64::CMPNE_PPzZI_B:
+ case AArch64::PTRUE_C_B:
+ case AArch64::PTRUE_B:
return I->getFlag(MachineInstr::FrameSetup) ||
I->getFlag(MachineInstr::FrameDestroy);
case AArch64::SEH_SavePReg:
@@ -71,6 +69,23 @@ static bool isSVECalleeSave(MachineBasicBlock::iterator I) {
}
}
+// Convenience function to determine whether I is part of the PPR callee saves.
+static bool isPartOfPPRCalleeSaves(MachineBasicBlock::iterator I) {
+ switch (I->getOpcode()) {
+ default:
+ return false;
+ case AArch64::STR_PXI:
+ case AArch64::LDR_PXI:
+ return I->getFlag(MachineInstr::FrameSetup) ||
+ I->getFlag(MachineInstr::FrameDestroy);
+ }
+}
+
+// Convenience function to determine whether I is part of the SVE callee saves.
+static bool isPartOfSVECalleeSaves(MachineBasicBlock::iterator I) {
+ return isPartOfZPRCalleeSaves(I) || isPartOfPPRCalleeSaves(I);
+}
+
AArch64PrologueEpilogueCommon::AArch64PrologueEpilogueCommon(
MachineFunction &MF, MachineBasicBlock &MBB,
const AArch64FrameLowering &AFL)
@@ -316,7 +331,7 @@ bool AArch64PrologueEpilogueCommon::shouldCombineCSRLocalStackBump(
// When there is an SVE area on the stack, always allocate the
// callee-saves and spills/locals separately.
- if (AFL.getSVEStackSize(MF))
+ if (AFI->hasSVEStackSize())
return false;
return true;
@@ -639,7 +654,7 @@ void AArch64PrologueEmitter::emitPrologue() {
// Now allocate space for the GPR callee saves.
MachineBasicBlock::iterator MBBI = PrologueBeginI;
- while (MBBI != EndI && isSVECalleeSave(MBBI))
+ while (MBBI != EndI && isPartOfSVECalleeSaves(MBBI))
++MBBI;
FirstGPRSaveI = convertCalleeSaveRestoreToSPPrePostIncDec(
MBBI, DL, -AFI->getCalleeSavedStackSize(), EmitAsyncCFI);
@@ -669,7 +684,7 @@ void AArch64PrologueEmitter::emitPrologue() {
MachineBasicBlock::iterator AfterGPRSavesI = FirstGPRSaveI;
while (AfterGPRSavesI != EndI &&
AfterGPRSavesI->getFlag(MachineInstr::FrameSetup) &&
- !isSVECalleeSave(AfterGPRSavesI)) {
+ !isPartOfSVECalleeSaves(AfterGPRSavesI)) {
if (CombineSPBump &&
// Only fix-up frame-setup load/store instructions.
(!AFL.requiresSaveVG(MF) || !isVGInstruction(AfterGPRSavesI, TLI)))
@@ -700,56 +715,105 @@ void AArch64PrologueEmitter::emitPrologue() {
if (AFL.windowsRequiresStackProbe(MF, NumBytes + RealignmentPadding))
emitWindowsStackProbe(AfterGPRSavesI, DL, NumBytes, RealignmentPadding);
- StackOffset SVEStackSize = AFL.getSVEStackSize(MF);
- StackOffset SVECalleeSavesSize = {}, SVELocalsSize = SVEStackSize;
- MachineBasicBlock::iterator CalleeSavesEnd = AfterGPRSavesI;
+ StackOffset PPRCalleeSavesSize =
+ StackOffset::getScalable(AFI->getPPRCalleeSavedStackSize());
+ StackOffset ZPRCalleeSavesSize =
+ StackOffset::getScalable(AFI->getZPRCalleeSavedStackSize());
+ StackOffset SVECalleeSavesSize = PPRCalleeSavesSize + ZPRCalleeSavesSize;
+ StackOffset PPRLocalsSize = AFL.getPPRStackSize(MF) - PPRCalleeSavesSize;
+ StackOffset ZPRLocalsSize = AFL.getZPRStackSize(MF) - ZPRCalleeSavesSize;
+
+ std::optional<MachineBasicBlock::iterator> ZPRCalleeSavesBegin,
+ ZPRCalleeSavesEnd, PPRCalleeSavesBegin, PPRCalleeSavesEnd;
StackOffset CFAOffset =
StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes);
-
- // Process the SVE callee-saves to determine what space needs to be
- // allocated.
MachineBasicBlock::iterator AfterSVESavesI = AfterGPRSavesI;
- if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) {
- LLVM_DEBUG(dbgs() << "SVECalleeSavedStackSize = " << CalleeSavedSize
- << "\n");
- SVECalleeSavesSize = StackOffset::getScalable(CalleeSavedSize);
- SVELocalsSize = SVEStackSize - SVECalleeSavesSize;
- // Find callee save instructions in frame.
- // Note: With FPAfterSVECalleeSaves the callee saves have already been
- // allocated.
- if (!FPAfterSVECalleeSaves) {
- MachineBasicBlock::iterator CalleeSavesBegin = AfterGPRSavesI;
- assert(isSVECalleeSave(CalleeSavesBegin) && "Unexpected instruction");
- while (isSVECalleeSave(AfterSVESavesI) &&
+ if (!FPAfterSVECalleeSaves) {
+ // Process the SVE callee-saves to find the starts/ends of the ZPR and PPR
+ // areas.
+ PPRCalleeSavesBegin = AfterGPRSavesI;
+ if (PPRCalleeSavesSize) {
+ LLVM_DEBUG(dbgs() << "PPRCalleeSavedStackSize = "
+ << PPRCalleeSavesSize.getScalable() << "\n");
+
+ assert(isPartOfPPRCalleeSaves(*PPRCalleeSavesBegin) &&
+ "Unexpected instruction");
+ while (isPartOfPPRCalleeSaves(AfterSVESavesI) &&
+ AfterSVESavesI != MBB.getFirstTerminator())
+ ++AfterSVESavesI;
+ }
+ PPRCalleeSavesEnd = ZPRCalleeSavesBegin = AfterSVESavesI;
+ if (ZPRCalleeSavesSize) {
+ LLVM_DEBUG(dbgs() << "ZPRCalleeSavedStackSize = "
+ << ZPRCalleeSavesSize.getScalable() << "\n");
+ assert(isPartOfZPRCalleeSaves(*ZPRCalleeSavesBegin) &&
+ "Unexpected instruction");
+ while (isPartOfZPRCalleeSaves(AfterSVESavesI) &&
AfterSVESavesI != MBB.getFirstTerminator())
++AfterSVESavesI;
- CalleeSavesEnd = AfterSVESavesI;
-
- StackOffset LocalsSize = SVELocalsSize + StackOffset::getFixed(NumBytes);
- // Allocate space for the callee saves (if any).
- allocateStackSpace(CalleeSavesBegin, 0, SVECalleeSavesSize,
- EmitAsyncCFI && !HasFP, CFAOffset,
- MFI.hasVarSizedObjects() || LocalsSize);
}
+ ZPRCalleeSavesEnd = AfterSVESavesI;
}
- CFAOffset += SVECalleeSavesSize;
if (EmitAsyncCFI)
- emitCalleeSavedSVELocations(CalleeSavesEnd);
-
- // Allocate space for the rest of the frame including SVE locals. Align the
- // stack as necessary.
- assert(!(AFL.canUseRedZone(MF) && NeedsRealignment) &&
- "Cannot use redzone with stack realignment");
- if (!AFL.canUseRedZone(MF)) {
- // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have
- // the correct value here, as NumBytes also includes padding bytes,
- // which shouldn't be counted here.
- allocateStackSpace(CalleeSavesEnd, RealignmentPadding,
- SVELocalsSize + StackOffset::getFixed(NumBytes),
+ emitCalleeSavedSVELocations(AfterSVESavesI);
+
+ if (AFI->hasSplitSVEObjects()) {
+ assert(!FPAfterSVECalleeSaves &&
+ "Cannot use FPAfterSVECalleeSaves with aarch64-split-sve-objects");
+ assert(!AFL.canUseRedZone(MF) &&
+ "Cannot use redzone with aarch64-split-sve-objects");
+ // TODO: Handle HasWinCFI/NeedsWinCFI?
+ assert(!NeedsWinCFI &&
+ "WinCFI with aarch64-split-sve-objects is not supported");
+
+ // Split ZPR and PPR allocation.
+ // Allocate PPR callee saves
+ allocateStackSpace(*PPRCalleeSavesBegin, 0, PPRCalleeSavesSize,
+ EmitAsyncCFI && !HasFP, CFAOffset,
+ MFI.hasVarSizedObjects() || ZPRCalleeSavesSize ||
+ ZPRLocalsSize || PPRLocalsSize);
+ CFAOffset += PPRCalleeSavesSize;
+
+ // Allocate PPR locals + ZPR callee saves
+ assert(PPRCalleeSavesEnd == ZPRCalleeSavesBegin &&
+ "Expected ZPR callee saves after PPR locals");
+ allocateStackSpace(*PPRCalleeSavesEnd, RealignmentPadding,
+ PPRLocalsSize + ZPRCalleeSavesSize,
+ EmitAsyncCFI && !HasFP, CFAOffset,
+ MFI.hasVarSizedObjects() || ZPRLocalsSize);
+ CFAOffset += PPRLocalsSize + ZPRCalleeSavesSize;
+
+ // Allocate ZPR locals
+ allocateStackSpace(*ZPRCalleeSavesEnd, RealignmentPadding,
+ ZPRLocalsSize + StackOffset::getFixed(NumBytes),
EmitAsyncCFI && !HasFP, CFAOffset,
MFI.hasVarSizedObjects());
+ } else {
+ // Allocate space for the callee saves (if any).
+ StackOffset LocalsSize =
+ PPRLocalsSize + ZPRLocalsSize + StackOffset::getFixed(NumBytes);
+ if (!FPAfterSVECalleeSaves)
+ allocateStackSpace(AfterGPRSavesI, 0, SVECalleeSavesSize,
+ EmitAsyncCFI && !HasFP, CFAOffset,
+ MFI.hasVarSizedObjects() || LocalsSize);
+ CFAOffset += SVECalleeSavesSize;
+
+ // Allocate space for the rest of the frame including SVE locals. Align the
+ // stack as necessary.
+ assert(!(AFL.canUseRedZone(MF) && NeedsRealignment) &&
+ "Cannot use redzone with stack realignment");
+ if (!AFL.canUseRedZone(MF)) {
+ // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have
+ // the correct value here, as NumBytes also includes padding bytes,
+ // which shouldn't be counted here.
+ StackOffset SVELocalsSize = PPRLocalsSize + ZPRLocalsSize;
+ allocateStackSpace(AfterSVESavesI, RealignmentPadding,
+ SVELocalsSize + StackOffset::getFixed(NumBytes),
+ EmitAsyncCFI && !HasFP, CFAOffset,
+ MFI.hasVarSizedObjects());
+ }
}
// If we need a base pointer, set it up here. It's whatever the value of the
@@ -796,7 +860,8 @@ void AArch64PrologueEmitter::emitPrologue() {
emitDefineCFAWithFP(AfterSVESavesI, FixedObject);
} else {
StackOffset TotalSize =
- SVEStackSize + StackOffset::getFixed((int64_t)MFI.getStackSize());
+ AFL.getSVEStackSize(MF) +
+ StackOffset::getFixed((int64_t)MFI.getStackSize());
CFIInstBuilder CFIBuilder(MBB, AfterSVESavesI, MachineInstr::FrameSetup);
CFIBuilder.insertCFIInst(
createDefCFA(RegInfo, /*FrameReg=*/AArch64::SP, /*Reg=*/AArch64::SP,
@@ -1165,7 +1230,7 @@ void AArch64PrologueEmitter::emitCalleeSavedGPRLocations(
CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
for (const auto &Info : CSI) {
unsigned FrameIdx = Info.getFrameIdx();
- if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector)
+ if (MFI.hasScalableStackID(FrameIdx))
continue;
assert(!Info.isSpilledToReg() && "Spilling to registers not implemented");
@@ -1191,8 +1256,10 @@ void AArch64PrologueEmitter::emitCalleeSavedSVELocations(
AFL.getOffsetOfLocalArea();
}
+ StackOffset PPRStackSize = AFL.getPPRStackSize(MF);
for (const auto &Info : CSI) {
- if (MFI.getStackID(Info.getFrameIdx()) != TargetStackID::ScalableVector)
+ int FI = Info.getFrameIdx();
+ if (!MFI.hasScalableStackID(FI))
continue;
// Not all unwinders may know about SVE registers, so assume the lowest
@@ -1203,9 +1270,13 @@ void AArch64PrologueEmitter::emitCalleeSavedSVELocations(
continue;
StackOffset Offset =
- StackOffset::getScalable(MFI.getObjectOffset(Info.getFrameIdx())) -
+ StackOffset::getScalable(MFI.getObjectOffset(FI)) -
StackOffset::getFixed(AFI->getCalleeSavedStackSize(MFI));
+ if (AFI->hasSplitSVEObjects() &&
+ MFI.getStackID(FI) == TargetStackID::ScalableVector)
+ Offset -= PPRStackSize;
+
CFIBuilder.insertCFIInst(
createCFAOffset(RegInfo, Reg, Offset, IncomingVGOffsetFromDefCFA));
}
@@ -1322,7 +1393,7 @@ void AArch64EpilogueEmitter::emitEpilogue() {
while (FirstGPRRestoreI != Begin) {
--FirstGPRRestoreI;
if (!FirstGPRRestoreI->getFlag(MachineInstr::FrameDestroy) ||
- (!FPAfterSVECalleeSaves && isSVECalleeSave(FirstGPRRestoreI))) {
+ (!FPAfterSVECalleeSaves && isPartOfSVECalleeSaves(FirstGPRRestoreI))) {
++FirstGPRRestoreI;
break;
} else if (CombineSPBump)
@@ -1346,7 +1417,9 @@ void AArch64EpilogueEmitter::emitEpilogue() {
if (HasFP && AFI->hasSwiftAsyncContext())
emitSwiftAsyncContextFramePointer(EpilogueEndI, DL);
- const StackOffset &SVEStackSize = AFL.getSVEStackSize(MF);
+ StackOffset ZPRStackSize = AFL.getZPRStackSize(MF);
+ StackOffset PPRStackSize = AFL.getPPRStackSize(MF);
+ StackOffset SVEStackSize = ZPRStackSize + PPRStackSize;
// If there is a single SP update, insert it before the ret and we're done.
if (CombineSPBump) {
@@ -1367,106 +1440,188 @@ void AArch64EpilogueEmitter::emitEpilogue() {
NumBytes -= PrologueSaveSize;
assert(NumBytes >= 0 && "Negative stack allocation size!?");
- // Process the SVE callee-saves to determine what space needs to be
- // deallocated.
- StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
- MachineBasicBlock::iterator RestoreBegin = FirstGPRRestoreI,
- RestoreEnd = FirstGPRRestoreI;
- if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) {
- if (FPAfterSVECalleeSaves)
- RestoreEnd = MBB.getFirstTerminator();
-
- RestoreBegin = std::prev(RestoreEnd);
- while (RestoreBegin != MBB.begin() &&
- isSVECalleeSave(std::prev(RestoreBegin)))
- --RestoreBegin;
-
- assert(isSVECalleeSave(RestoreBegin) &&
- isSVECalleeSave(std::prev(RestoreEnd)) && "Unexpected instruction");
-
- StackOffset CalleeSavedSizeAsOffset =
- StackOffset::getScalable(CalleeSavedSize);
- DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
- DeallocateAfter = CalleeSavedSizeAsOffset;
- }
-
- // Deallocate the SVE area.
- if (FPAfterSVECalleeSaves) {
- // If the callee-save area is before FP, restoring the FP implicitly
- // deallocates non-callee-save SVE allocations. Otherwise, deallocate
- // them explicitly.
- if (!AFI->isStackRealigned() && !MFI.hasVarSizedObjects()) {
- emitFrameOffset(MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP,
- DeallocateBefore, TII, MachineInstr::FrameDestroy, false,
- NeedsWinCFI, &HasWinCFI);
+ if (!AFI->hasSplitSVEObjects()) {
+ // Process the SVE callee-saves to determine what space needs to be
+ // deallocated.
+ StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
+ MachineBasicBlock::iterator RestoreBegin = FirstGPRRestoreI,
+ RestoreEnd = FirstGPRRestoreI;
+ int64_t ZPRCalleeSavedSize = AFI->getZPRCalleeSavedStackSize();
+ int64_t PPRCalleeSavedSize = AFI->getPPRCalleeSavedStackSize();
+ int64_t SVECalleeSavedSize = ZPRCalleeSavedSize + PPRCalleeSavedSize;
+
+ if (SVECalleeSavedSize) {
+ if (FPAfterSVECalleeSaves)
+ RestoreEnd = MBB.getFirstTerminator();
+
+ RestoreBegin = std::prev(RestoreEnd);
+ while (RestoreBegin != MBB.begin() &&
+ isPartOfSVECalleeSaves(std::prev(RestoreBegin)))
+ --RestoreBegin;
+
+ assert(isPartOfSVECalleeSaves(RestoreBegin) &&
+ isPartOfSVECalleeSaves(std::prev(RestoreEnd)) &&
+ "Unexpected instruction");
+
+ StackOffset CalleeSavedSizeAsOffset =
+ StackOffset::getScalable(SVECalleeSavedSize);
+ DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
+ DeallocateAfter = CalleeSavedSizeAsOffset;
}
- // Deallocate callee-save non-SVE registers.
- emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
- StackOffset::getFixed(AFI->getCalleeSavedStackSize()), TII,
- MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
-
- // Deallocate fixed objects.
- emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
- StackOffset::getFixed(FixedObject), TII,
- MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
-
- // Deallocate callee-save SVE registers.
- emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
- DeallocateAfter, TII, MachineInstr::FrameDestroy, false,
- NeedsWinCFI, &HasWinCFI);
- } else if (SVEStackSize) {
- int64_t SVECalleeSavedSize = AFI->getSVECalleeSavedStackSize();
- // If we have stack realignment or variable-sized objects we must use the
- // FP to restore SVE callee saves (as there is an unknown amount of
- // data/padding between the SP and SVE CS area).
- Register BaseForSVEDealloc =
- (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) ? AArch64::FP
- : AArch64::SP;
- if (SVECalleeSavedSize && BaseForSVEDealloc == AArch64::FP) {
- Register CalleeSaveBase = AArch64::FP;
- if (int64_t CalleeSaveBaseOffset =
- AFI->getCalleeSaveBaseToFrameRecordOffset()) {
- // If we have have an non-zero offset to the non-SVE CS base we need to
- // compute the base address by subtracting the offest in a temporary
- // register first (to avoid briefly deallocating the SVE CS).
- CalleeSaveBase =
- MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
- emitFrameOffset(MBB, RestoreBegin, DL, CalleeSaveBase, AArch64::FP,
- StackOffset::getFixed(-CalleeSaveBaseOffset), TII,
- MachineInstr::FrameDestroy);
- }
- // The code below will deallocate the stack space space by moving the
- // SP to the start of the SVE callee-save area.
- emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, CalleeSaveBase,
- StackOffset::getScalable(-SVECalleeSavedSize), TII,
- MachineInstr::FrameDestroy);
- } else if (BaseForSVEDealloc == AArch64::SP) {
- if (SVECalleeSavedSize) {
- // Deallocate the non-SVE locals first before we can deallocate (and
- // restore callee saves) from the SVE area.
- emitFrameOffset(
- MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
- StackOffset::getFixed(NumBytes), TII, MachineInstr::FrameDestroy,
- false, NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP,
- SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize));
- NumBytes = 0;
+ // Deallocate the SVE area.
+ if (FPAfterSVECalleeSaves) {
+ // If the callee-save area is before FP, restoring the FP implicitly
+ // deallocates non-callee-save SVE allocations. Otherwise, deallocate
+ // them explicitly.
+ if (!AFI->isStackRealigned() && !MFI.hasVarSizedObjects()) {
+ emitFrameOffset(MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP,
+ DeallocateBefore, TII, MachineInstr::FrameDestroy,
+ false, NeedsWinCFI, &HasWinCFI);
}
+ // Deallocate callee-save non-SVE registers.
emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
- DeallocateBefore, TII, MachineInstr::FrameDestroy, false,
- NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP,
- SVEStackSize +
- StackOffset::getFixed(NumBytes + PrologueSaveSize));
+ StackOffset::getFixed(AFI->getCalleeSavedStackSize()),
+ TII, MachineInstr::FrameDestroy, false, NeedsWinCFI,
+ &HasWinCFI);
+
+ // Deallocate fixed objects.
+ emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
+ StackOffset::getFixed(FixedObject), TII,
+ MachineInstr::FrameDestroy, false, NeedsWinCFI,
+ &HasWinCFI);
+ // Deallocate callee-save SVE registers.
emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
DeallocateAfter, TII, MachineInstr::FrameDestroy, false,
- NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP,
- DeallocateAfter +
- StackOffset::getFixed(NumBytes + PrologueSaveSize));
+ NeedsWinCFI, &HasWinCFI);
+ } else if (SVEStackSize) {
+ int64_t SVECalleeSavedSize = AFI->getSVECalleeSavedStackSize();
+ // If we have stack realignment or variable-sized objects we must use the
+ // FP to restore SVE callee saves (as there is an unknown amount of
+ // data/padding between the SP and SVE CS area).
+ Register BaseForSVEDealloc =
+ (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) ? AArch64::FP
+ : AArch64::SP;
+ if (SVECalleeSavedSize && BaseForSVEDealloc == AArch64::FP) {
+ Register CalleeSaveBase = AArch64::FP;
+ if (int64_t CalleeSaveBaseOffset =
+ AFI->getCalleeSaveBaseToFrameRecordOffset()) {
+ // If we have have an non-zero offset to the non-SVE CS base we need
+ // to compute the base address by subtracting the offest in a
+ // temporary register first (to avoid briefly deallocating the SVE
+ // CS).
+ CalleeSaveBase = MBB.getParent()->getRegInfo().createVirtualRegister(
+ &AArch64::GPR64RegClass);
+ emitFrameOffset(MBB, RestoreBegin, DL, CalleeSaveBase, AArch64::FP,
+ StackOffset::getFixed(-CalleeSaveBaseOffset), TII,
+ MachineInstr::FrameDestroy);
+ }
+ // The code below will deallocate the stack space space by moving the
+ // SP to the start of the SVE callee-save area.
+ emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, CalleeSaveBase,
+ StackOffset::getScalable(-SVECalleeSavedSize), TII,
+ MachineInstr::FrameDestroy);
+ } else if (BaseForSVEDealloc == AArch64::SP) {
+ if (SVECalleeSavedSize) {
+ // Deallocate the non-SVE locals first before we can deallocate (and
+ // restore callee saves) from the SVE area.
+ emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
+ StackOffset::getFixed(NumBytes), TII,
+ MachineInstr::FrameDestroy, false, NeedsWinCFI,
+ &HasWinCFI, EmitCFI && !HasFP,
+ SVEStackSize + StackOffset::getFixed(
+ NumBytes + PrologueSaveSize));
+ NumBytes = 0;
+ }
+
+ emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
+ DeallocateBefore, TII, MachineInstr::FrameDestroy,
+ false, NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP,
+ SVEStackSize +
+ StackOffset::getFixed(NumBytes + PrologueSaveSize));
+
+ emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
+ DeallocateAfter, TII, MachineInstr::FrameDestroy, false,
+ NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP,
+ DeallocateAfter +
+ StackOffset::getFixed(NumBytes + PrologueSaveSize));
+ }
+
+ if (EmitCFI)
+ emitCalleeSavedSVERestores(RestoreEnd);
+ }
+ } else if (AFI->hasSplitSVEObjects() && SVEStackSize) {
+ // TODO: Support stack realigment and variable-sized objects.
+ assert(!AFI->isStackRealigned() && !MFI.hasVarSizedObjects() &&
+ "unexpected stack realignment or variable sized objects with split "
+ "SVE stack objects");
+ // SplitSVEObjects. Determine the sizes and starts/ends of the ZPR and PPR
+ // areas.
+ auto ZPRCalleeSavedSize =
+ StackOffset::getScalable(AFI->getZPRCalleeSavedStackSize());
+ auto PPRCalleeSavedSize =
+ StackOffset::getScalable(AFI->getPPRCalleeSavedStackSize());
+ StackOffset PPRLocalsSize = PPRStackSize - PPRCalleeSavedSize;
+ StackOffset ZPRLocalsSize = ZPRStackSize - ZPRCalleeSavedSize;
+
+ MachineBasicBlock::iterator PPRRestoreBegin = FirstGPRRestoreI,
+ PPRRestoreEnd = FirstGPRRestoreI;
+ if (PPRCalleeSavedSize) {
+ PPRRestoreBegin = std::prev(PPRRestoreEnd);
+ while (PPRRestoreBegin != MBB.begin() &&
+ isPartOfPPRCalleeSaves(std::prev(PPRRestoreBegin)))
+ --PPRRestoreBegin;
+ }
+
+ MachineBasicBlock::iterator ZPRRestoreBegin = PPRRestoreBegin,
+ ZPRRestoreEnd = PPRRestoreBegin;
+ if (ZPRCalleeSavedSize) {
+ ZPRRestoreBegin = std::prev(ZPRRestoreEnd);
+ while (ZPRRestoreBegin != MBB.begin() &&
+ isPartOfZPRCalleeSaves(std::prev(ZPRRestoreBegin)))
+ --ZPRRestoreBegin;
}
+
+ auto CFAOffset =
+ SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize);
+ if (PPRCalleeSavedSize || ZPRCalleeSavedSize) {
+ // Deallocate the non-SVE locals first before we can deallocate (and
+ // restore callee saves) from the SVE area.
+ auto NonSVELocals = StackOffset::getFixed(NumBytes);
+ emitFrameOffset(MBB, ZPRRestoreBegin, DL, AArch64::SP, AArch64::SP,
+ NonSVELocals, TII, MachineInstr::FrameDestroy, false,
+ false, nullptr, EmitCFI && !HasFP, CFAOffset);
+ NumBytes = 0;
+ CFAOffset -= NonSVELocals;
+ }
+
+ if (ZPRLocalsSize) {
+ emitFrameOffset(MBB, ZPRRestoreBegin, DL, AArch64::SP, AArch64::SP,
+ ZPRLocalsSize, TII, MachineInstr::FrameDestroy, false,
+ false, nullptr, EmitCFI && !HasFP, CFAOffset);
+ CFAOffset -= ZPRLocalsSize;
+ }
+
+ if (PPRLocalsSize || ZPRCalleeSavedSize) {
+ assert(PPRRestoreBegin == ZPRRestoreEnd &&
+ "Expected PPR restores after ZPR");
+ emitFrameOffset(MBB, PPRRestoreBegin, DL, AArch64::SP, AArch64::SP,
+ PPRLocalsSize + ZPRCalleeSavedSize, TII,
+ MachineInstr::FrameDestroy, false, false, nullptr,
+ EmitCFI && !HasFP, CFAOffset);
+ CFAOffset -= PPRLocalsSize + ZPRCalleeSavedSize;
+ }
+ if (PPRCalleeSavedSize) {
+ emitFrameOffset(MBB, PPRRestoreEnd, DL, AArch64::SP, AArch64::SP,
+ PPRCalleeSavedSize, TII, MachineInstr::FrameDestroy,
+ false, false, nullptr, EmitCFI && !HasFP, CFAOffset);
+ }
+
+ // We only emit CFI information for ZPRs so emit CFI after the ZPR restores.
if (EmitCFI)
- emitCalleeSavedSVERestores(RestoreEnd);
+ emitCalleeSavedSVERestores(ZPRRestoreEnd);
}
if (!HasFP) {
@@ -1624,8 +1779,7 @@ void AArch64EpilogueEmitter::emitCalleeSavedRestores(
CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameDestroy);
for (const auto &Info : CSI) {
- if (SVE !=
- (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector))
+ if (SVE != MFI.hasScalableStackID(Info.getFrameIdx()))
continue;
MCRegister Reg = Info.getReg();
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 2b0c8ad..79975b0 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -71,6 +71,7 @@ bool AArch64RegisterInfo::regNeedsCFI(MCRegister Reg,
const MCPhysReg *
AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
+ auto &AFI = *MF->getInfo<AArch64FunctionInfo>();
if (MF->getFunction().getCallingConv() == CallingConv::GHC)
// GHC set of callee saved regs is empty as all those regs are
@@ -101,10 +102,7 @@ AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList;
if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
return CSR_Win_AArch64_AAVPCS_SaveList;
- if (MF->getFunction().getCallingConv() ==
- CallingConv::AArch64_SVE_VectorCall)
- return CSR_Win_AArch64_SVE_AAPCS_SaveList;
- if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
+ if (AFI.hasSVE_AAPCS(*MF))
return CSR_Win_AArch64_SVE_AAPCS_SaveList;
return CSR_Win_AArch64_AAPCS_SaveList;
}
@@ -148,7 +146,7 @@ AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
// This is for OSes other than Windows; Windows is a separate case further
// above.
return CSR_AArch64_AAPCS_X18_SaveList;
- if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
+ if (AFI.hasSVE_AAPCS(*MF))
return CSR_AArch64_SVE_AAPCS_SaveList;
return CSR_AArch64_AAPCS_SaveList;
}
@@ -158,6 +156,7 @@ AArch64RegisterInfo::getDarwinCalleeSavedRegs(const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
assert(MF->getSubtarget<AArch64Subtarget>().isTargetDarwin() &&
"Invalid subtarget for getDarwinCalleeSavedRegs");
+ auto &AFI = *MF->getInfo<AArch64FunctionInfo>();
if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check)
report_fatal_error(
@@ -205,7 +204,7 @@ AArch64RegisterInfo::getDarwinCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_Darwin_AArch64_RT_AllRegs_SaveList;
if (MF->getFunction().getCallingConv() == CallingConv::Win64)
return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
- if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
+ if (AFI.hasSVE_AAPCS(*MF))
return CSR_Darwin_AArch64_SVE_AAPCS_SaveList;
return CSR_Darwin_AArch64_AAPCS_SaveList;
}
@@ -643,7 +642,7 @@ bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
if (ST.hasSVE() || ST.isStreaming()) {
// Frames that have variable sized objects and scalable SVE objects,
// should always use a basepointer.
- if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
+ if (!AFI->hasCalculatedStackSizeSVE() || AFI->hasSVEStackSize())
return true;
}
@@ -783,7 +782,7 @@ AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
AFI->hasCalculatedStackSizeSVE()) &&
"Expected SVE area to be calculated by this point");
- return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE() &&
+ return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->hasSVEStackSize() &&
!AFI->hasStackHazardSlotIndex();
}
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 1e30735..36c9cb6 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -707,16 +707,14 @@ let Predicates = [HasSVE_or_SME] in {
defm SDOT_ZZZ : sve_intx_dot<0b0, "sdot", AArch64sdot>;
defm UDOT_ZZZ : sve_intx_dot<0b1, "udot", AArch64udot>;
- let Predicates = [HasSVE_or_SME] in {
- def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)),
- (UDOT_ZZZ_BtoS $Acc, $MulLHS, $MulRHS)>;
- def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)),
- (SDOT_ZZZ_BtoS $Acc, $MulLHS, $MulRHS)>;
- def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)),
- (UDOT_ZZZ_HtoD $Acc, $MulLHS, $MulRHS)>;
- def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)),
- (SDOT_ZZZ_HtoD $Acc, $MulLHS, $MulRHS)>;
- } // End HasSVE_or_SME
+ def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)),
+ (UDOT_ZZZ_BtoS $Acc, $MulLHS, $MulRHS)>;
+ def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)),
+ (SDOT_ZZZ_BtoS $Acc, $MulLHS, $MulRHS)>;
+ def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)),
+ (UDOT_ZZZ_HtoD $Acc, $MulLHS, $MulRHS)>;
+ def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)),
+ (SDOT_ZZZ_HtoD $Acc, $MulLHS, $MulRHS)>;
defm SDOT_ZZZI : sve_intx_dot_by_indexed_elem<0b0, "sdot", int_aarch64_sve_sdot_lane>;
defm UDOT_ZZZI : sve_intx_dot_by_indexed_elem<0b1, "udot", int_aarch64_sve_udot_lane>;
@@ -3646,6 +3644,9 @@ let Predicates = [HasSVE_or_SME, HasMatMulInt8] in {
defm USDOT_ZZZ : sve_int_dot_mixed<"usdot", AArch64usdot>;
defm USDOT_ZZZI : sve_int_dot_mixed_indexed<0, "usdot", int_aarch64_sve_usdot_lane>;
defm SUDOT_ZZZI : sve_int_dot_mixed_indexed<1, "sudot", int_aarch64_sve_sudot_lane>;
+
+ def : Pat<(nxv4i32 (partial_reduce_sumla nxv4i32:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)),
+ (USDOT_ZZZ $Acc, $RHS, $LHS)>;
} // End HasSVE_or_SME, HasMatMulInt8
let Predicates = [HasSVE, HasMatMulFP32] in {
@@ -3752,6 +3753,19 @@ let Predicates = [HasSVE2_or_SME] in {
defm UMLSLB_ZZZ : sve2_int_mla_long<0b10110, "umlslb", int_aarch64_sve_umlslb>;
defm UMLSLT_ZZZ : sve2_int_mla_long<0b10111, "umlslt", int_aarch64_sve_umlslt>;
+ def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv4i32:$LHS, nxv4i32:$RHS)),
+ (UMLALT_ZZZ_D (UMLALB_ZZZ_D $Acc, $LHS, $RHS), $LHS, $RHS)>;
+ def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv4i32:$LHS, nxv4i32:$RHS)),
+ (SMLALT_ZZZ_D (SMLALB_ZZZ_D $Acc, $LHS, $RHS), $LHS, $RHS)>;
+ def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv8i16:$LHS, nxv8i16:$RHS)),
+ (UMLALT_ZZZ_S (UMLALB_ZZZ_S $Acc, $LHS, $RHS), $LHS, $RHS)>;
+ def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv8i16:$LHS, nxv8i16:$RHS)),
+ (SMLALT_ZZZ_S (SMLALB_ZZZ_S $Acc, $LHS, $RHS), $LHS, $RHS)>;
+ def : Pat<(nxv8i16 (partial_reduce_umla nxv8i16:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)),
+ (UMLALT_ZZZ_H (UMLALB_ZZZ_H $Acc, $LHS, $RHS), $LHS, $RHS)>;
+ def : Pat<(nxv8i16 (partial_reduce_smla nxv8i16:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)),
+ (SMLALT_ZZZ_H (SMLALB_ZZZ_H $Acc, $LHS, $RHS), $LHS, $RHS)>;
+
// SVE2 saturating multiply-add long (indexed)
defm SQDMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0100, "sqdmlalb", int_aarch64_sve_sqdmlalb_lane>;
defm SQDMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0101, "sqdmlalt", int_aarch64_sve_sqdmlalt_lane>;
@@ -3880,19 +3894,6 @@ let Predicates = [HasSVE2_or_SME] in {
def : Pat<(nxv8i16 (partial_reduce_smla nxv8i16:$Acc, nxv16i8:$Input, (nxv16i8 (splat_vector (i32 1))))),
(SADDWT_ZZZ_H (SADDWB_ZZZ_H $Acc, $Input), $Input)>;
- def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv4i32:$LHS, nxv4i32:$RHS)),
- (UMLALT_ZZZ_D (UMLALB_ZZZ_D $Acc, $LHS, $RHS), $LHS, $RHS)>;
- def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv4i32:$LHS, nxv4i32:$RHS)),
- (SMLALT_ZZZ_D (SMLALB_ZZZ_D $Acc, $LHS, $RHS), $LHS, $RHS)>;
- def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv8i16:$LHS, nxv8i16:$RHS)),
- (UMLALT_ZZZ_S (UMLALB_ZZZ_S $Acc, $LHS, $RHS), $LHS, $RHS)>;
- def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv8i16:$LHS, nxv8i16:$RHS)),
- (SMLALT_ZZZ_S (SMLALB_ZZZ_S $Acc, $LHS, $RHS), $LHS, $RHS)>;
- def : Pat<(nxv8i16 (partial_reduce_umla nxv8i16:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)),
- (UMLALT_ZZZ_H (UMLALB_ZZZ_H $Acc, $LHS, $RHS), $LHS, $RHS)>;
- def : Pat<(nxv8i16 (partial_reduce_smla nxv8i16:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)),
- (SMLALT_ZZZ_H (SMLALB_ZZZ_H $Acc, $LHS, $RHS), $LHS, $RHS)>;
-
// SVE2 integer multiply long
defm SQDMULLB_ZZZ : sve2_wide_int_arith_long<0b11000, "sqdmullb", int_aarch64_sve_sqdmullb>;
defm SQDMULLT_ZZZ : sve2_wide_int_arith_long<0b11001, "sqdmullt", int_aarch64_sve_sqdmullt>;
@@ -4200,11 +4201,6 @@ let Predicates = [HasSVEAES2, HasNonStreamingSVE_or_SSVE_AES] in {
def PMULL_2ZZZ_Q : sve_crypto_pmull_multi<"pmull">;
}
-let Predicates = [HasSVE_or_SME, HasMatMulInt8] in {
- def : Pat<(nxv4i32 (partial_reduce_sumla nxv4i32:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)),
- (USDOT_ZZZ $Acc, $RHS, $LHS)>;
- } // End HasSVE_or_SME, HasMatMulInt8
-
//===----------------------------------------------------------------------===//
// SME or SVE2.1 instructions
//===----------------------------------------------------------------------===//
@@ -4238,12 +4234,10 @@ defm UDOT_ZZZ_HtoS : sve2p1_two_way_dot_vv<"udot", 0b1, int_aarch64_sve_udot_x2
defm SDOT_ZZZI_HtoS : sve2p1_two_way_dot_vvi<"sdot", 0b0, int_aarch64_sve_sdot_lane_x2>;
defm UDOT_ZZZI_HtoS : sve2p1_two_way_dot_vvi<"udot", 0b1, int_aarch64_sve_udot_lane_x2>;
-let Predicates = [HasSVE2p1_or_SME2] in {
- def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)),
- (UDOT_ZZZ_HtoS $Acc, $MulLHS, $MulRHS)>;
- def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)),
- (SDOT_ZZZ_HtoS $Acc, $MulLHS, $MulRHS)>;
-} // End HasSVE2p1_or_SME2
+def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)),
+ (UDOT_ZZZ_HtoS $Acc, $MulLHS, $MulRHS)>;
+def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)),
+ (SDOT_ZZZ_HtoS $Acc, $MulLHS, $MulRHS)>;
defm SQCVTN_Z2Z_StoH : sve2p1_multi_vec_extract_narrow<"sqcvtn", 0b00, int_aarch64_sve_sqcvtn_x2>;
defm UQCVTN_Z2Z_StoH : sve2p1_multi_vec_extract_narrow<"uqcvtn", 0b01, int_aarch64_sve_uqcvtn_x2>;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 7ee54c5..c197550e 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -438,7 +438,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
getActionDefinitionsBuilder({G_FCOS, G_FSIN, G_FPOW, G_FLOG, G_FLOG2,
G_FLOG10, G_FTAN, G_FEXP, G_FEXP2, G_FEXP10,
G_FACOS, G_FASIN, G_FATAN, G_FATAN2, G_FCOSH,
- G_FSINH, G_FTANH})
+ G_FSINH, G_FTANH, G_FMODF})
// We need a call for these, so we always need to scalarize.
.scalarize(0)
// Regardless of FP16 support, widen 16-bit elements to 32-bits.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index f01d5f6..6efa78e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -608,6 +608,8 @@ public:
? LDSToKernelsThatNeedToAccessItIndirectly[HybridModuleRoot]
: EmptySet;
+ const size_t HybridModuleRootKernelsSize = HybridModuleRootKernels.size();
+
for (auto &K : LDSToKernelsThatNeedToAccessItIndirectly) {
// Each iteration of this loop assigns exactly one global variable to
// exactly one of the implementation strategies.
@@ -647,7 +649,8 @@ public:
ModuleScopeVariables.insert(GV);
} else if (K.second.size() == 1) {
KernelAccessVariables.insert(GV);
- } else if (set_is_subset(K.second, HybridModuleRootKernels)) {
+ } else if (K.second.size() == HybridModuleRootKernelsSize &&
+ set_is_subset(K.second, HybridModuleRootKernels)) {
ModuleScopeVariables.insert(GV);
} else {
TableLookupVariables.insert(GV);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 92a587b..280fbe2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1384,6 +1384,11 @@ void AMDGPUPassConfig::addCodeGenPrepare() {
if (TM->getTargetTriple().isAMDGCN() && EnableLowerKernelArguments)
addPass(createAMDGPULowerKernelArgumentsPass());
+ TargetPassConfig::addCodeGenPrepare();
+
+ if (isPassEnabled(EnableLoadStoreVectorizer))
+ addPass(createLoadStoreVectorizerPass());
+
if (TM->getTargetTriple().isAMDGCN()) {
// This lowering has been placed after codegenprepare to take advantage of
// address mode matching (which is why it isn't put with the LDS lowerings).
@@ -1392,15 +1397,6 @@ void AMDGPUPassConfig::addCodeGenPrepare() {
// but has been put before switch lowering and CFG flattening so that those
// passes can run on the more optimized control flow this pass creates in
// many cases.
- //
- // FIXME: This should ideally be put after the LoadStoreVectorizer.
- // However, due to some annoying facts about ResourceUsageAnalysis,
- // (especially as exercised in the resource-usage-dead-function test),
- // we need all the function passes codegenprepare all the way through
- // said resource usage analysis to run on the call graph produced
- // before codegenprepare runs (because codegenprepare will knock some
- // nodes out of the graph, which leads to function-level passes not
- // being run on them, which causes crashes in the resource usage analysis).
addPass(createAMDGPULowerBufferFatPointersPass());
addPass(createAMDGPULowerIntrinsicsLegacyPass());
// In accordance with the above FIXME, manually force all the
@@ -1408,11 +1404,6 @@ void AMDGPUPassConfig::addCodeGenPrepare() {
addPass(new DummyCGSCCPass());
}
- TargetPassConfig::addCodeGenPrepare();
-
- if (isPassEnabled(EnableLoadStoreVectorizer))
- addPass(createLoadStoreVectorizerPass());
-
// LowerSwitch pass may introduce unreachable blocks that can
// cause unexpected behavior for subsequent passes. Placing it
// here seems better that these blocks would get cleaned up by
@@ -2125,6 +2116,11 @@ void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const {
if (EnableLowerKernelArguments)
addPass(AMDGPULowerKernelArgumentsPass(TM));
+ Base::addCodeGenPrepare(addPass);
+
+ if (isPassEnabled(EnableLoadStoreVectorizer))
+ addPass(LoadStoreVectorizerPass());
+
// This lowering has been placed after codegenprepare to take advantage of
// address mode matching (which is why it isn't put with the LDS lowerings).
// It could be placed anywhere before uniformity annotations (an analysis
@@ -2132,25 +2128,11 @@ void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const {
// but has been put before switch lowering and CFG flattening so that those
// passes can run on the more optimized control flow this pass creates in
// many cases.
- //
- // FIXME: This should ideally be put after the LoadStoreVectorizer.
- // However, due to some annoying facts about ResourceUsageAnalysis,
- // (especially as exercised in the resource-usage-dead-function test),
- // we need all the function passes codegenprepare all the way through
- // said resource usage analysis to run on the call graph produced
- // before codegenprepare runs (because codegenprepare will knock some
- // nodes out of the graph, which leads to function-level passes not
- // being run on them, which causes crashes in the resource usage analysis).
addPass(AMDGPULowerBufferFatPointersPass(TM));
addPass.requireCGSCCOrder();
addPass(AMDGPULowerIntrinsicsPass(TM));
- Base::addCodeGenPrepare(addPass);
-
- if (isPassEnabled(EnableLoadStoreVectorizer))
- addPass(LoadStoreVectorizerPass());
-
// LowerSwitch pass may introduce unreachable blocks that can cause unexpected
// behavior for subsequent passes. Placing it here seems better that these
// blocks would get cleaned up by UnreachableBlockElim inserted next in the
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index fab78a9..bdc0810 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -29,6 +29,7 @@
#include "SIMachineFunctionInfo.h"
#include "Utils/AMDGPUBaseInfo.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/Support/ErrorHandling.h"
@@ -1633,64 +1634,6 @@ void GCNSchedStage::revertScheduling() {
DAG.Regions[RegionIdx] = std::pair(DAG.RegionBegin, DAG.RegionEnd);
}
-bool PreRARematStage::allUsesAvailableAt(const MachineInstr *InstToRemat,
- SlotIndex OriginalIdx,
- SlotIndex RematIdx) const {
-
- LiveIntervals *LIS = DAG.LIS;
- MachineRegisterInfo &MRI = DAG.MRI;
- OriginalIdx = OriginalIdx.getRegSlot(true);
- RematIdx = std::max(RematIdx, RematIdx.getRegSlot(true));
- for (const MachineOperand &MO : InstToRemat->operands()) {
- if (!MO.isReg() || !MO.getReg() || !MO.readsReg())
- continue;
-
- if (!MO.getReg().isVirtual()) {
- // Do not attempt to reason about PhysRegs
- // TODO: better analysis of PhysReg livness
- if (!DAG.MRI.isConstantPhysReg(MO.getReg()) &&
- !DAG.TII->isIgnorableUse(MO))
- return false;
-
- // Constant PhysRegs and IgnorableUses are okay
- continue;
- }
-
- LiveInterval &LI = LIS->getInterval(MO.getReg());
- const VNInfo *OVNI = LI.getVNInfoAt(OriginalIdx);
- assert(OVNI);
-
- // Don't allow rematerialization immediately after the original def.
- // It would be incorrect if InstToRemat redefines the register.
- // See PR14098.
- if (SlotIndex::isSameInstr(OriginalIdx, RematIdx))
- return false;
-
- if (OVNI != LI.getVNInfoAt(RematIdx))
- return false;
-
- // Check that subrange is live at RematIdx.
- if (LI.hasSubRanges()) {
- const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
- unsigned SubReg = MO.getSubReg();
- LaneBitmask LM = SubReg ? TRI->getSubRegIndexLaneMask(SubReg)
- : MRI.getMaxLaneMaskForVReg(MO.getReg());
- for (LiveInterval::SubRange &SR : LI.subranges()) {
- if ((SR.LaneMask & LM).none())
- continue;
- if (!SR.liveAt(RematIdx))
- return false;
-
- // Early exit if all used lanes are checked. No need to continue.
- LM &= ~SR.LaneMask;
- if (LM.none())
- break;
- }
- }
- }
- return true;
-}
-
bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() {
const Function &F = MF.getFunction();
@@ -1812,9 +1755,9 @@ bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() {
// Do not rematerialize an instruction it it uses registers that aren't
// available at its use. This ensures that we are not extending any live
// range while rematerializing.
- SlotIndex DefIdx = DAG.LIS->getInstructionIndex(DefMI);
SlotIndex UseIdx = DAG.LIS->getInstructionIndex(*UseMI).getRegSlot(true);
- if (!allUsesAvailableAt(&DefMI, DefIdx, UseIdx))
+ if (!VirtRegAuxInfo::allUsesAvailableAt(&DefMI, UseIdx, *DAG.LIS, DAG.MRI,
+ *DAG.TII))
continue;
REMAT_DEBUG(dbgs() << "Region " << I << ": remat instruction " << DefMI);
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
index 06b9b64..8ea4267 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -496,12 +496,6 @@ private:
/// stage to their pre-stage values.
void finalizeGCNSchedStage() override;
- /// \p Returns true if all the uses in \p InstToRemat defined at \p
- /// OriginalIdx are live at \p RematIdx. This only checks liveness of virtual
- /// reg uses.
- bool allUsesAvailableAt(const MachineInstr *InstToRemat,
- SlotIndex OriginalIdx, SlotIndex RematIdx) const;
-
public:
bool initGCNSchedStage() override;
diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index 7c5d4fc..e4b3528 100644
--- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -924,6 +924,7 @@ bool SIFrameLowering::isSupportedStackID(TargetStackID::Value ID) const {
case TargetStackID::SGPRSpill:
return true;
case TargetStackID::ScalableVector:
+ case TargetStackID::ScalablePredicateVector:
case TargetStackID::WasmLocal:
return false;
}
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 1653008..f7265c5 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -64,14 +64,6 @@ static cl::opt<bool> UseDivergentRegisterIndexing(
cl::desc("Use indirect register addressing for divergent indexes"),
cl::init(false));
-// TODO: This option should be removed once we switch to always using PTRADD in
-// the SelectionDAG.
-static cl::opt<bool> UseSelectionDAGPTRADD(
- "amdgpu-use-sdag-ptradd", cl::Hidden,
- cl::desc("Generate ISD::PTRADD nodes for 64-bit pointer arithmetic in the "
- "SelectionDAG ISel"),
- cl::init(false));
-
static bool denormalModeIsFlushAllF32(const MachineFunction &MF) {
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
return Info->getMode().FP32Denormals == DenormalMode::getPreserveSign();
@@ -11466,7 +11458,7 @@ static bool isNoUnsignedWrap(SDValue Addr) {
bool SITargetLowering::shouldPreservePtrArith(const Function &F,
EVT PtrVT) const {
- return UseSelectionDAGPTRADD && PtrVT == MVT::i64;
+ return PtrVT == MVT::i64;
}
bool SITargetLowering::canTransformPtrArithOutOfBounds(const Function &F,
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index f291191..76bfce8 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -418,15 +418,14 @@ public:
class SIInsertWaitcnts {
public:
const GCNSubtarget *ST;
+ const SIInstrInfo *TII = nullptr;
+ const SIRegisterInfo *TRI = nullptr;
+ const MachineRegisterInfo *MRI = nullptr;
InstCounterType SmemAccessCounter;
InstCounterType MaxCounter;
const unsigned *WaitEventMaskForInst;
private:
- const SIInstrInfo *TII = nullptr;
- const SIRegisterInfo *TRI = nullptr;
- const MachineRegisterInfo *MRI = nullptr;
-
DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses;
DenseMap<MachineBasicBlock *, bool> PreheadersToFlush;
MachineLoopInfo *MLI;
@@ -495,13 +494,6 @@ public:
bool isVMEMOrFlatVMEM(const MachineInstr &MI) const;
bool run(MachineFunction &MF);
- bool isForceEmitWaitcnt() const {
- for (auto T : inst_counter_types())
- if (ForceEmitWaitcnt[T])
- return true;
- return false;
- }
-
void setForceEmitWaitcnt() {
// For non-debug builds, ForceEmitWaitcnt has been initialized to false;
// For debug builds, get the debug counter info and adjust if need be
@@ -570,10 +562,6 @@ public:
return VmemReadMapping[getVmemType(Inst)];
}
- bool hasXcnt() const { return ST->hasWaitXCnt(); }
-
- bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const;
- bool mayAccessLDSThroughFlat(const MachineInstr &MI) const;
bool isVmemAccess(const MachineInstr &MI) const;
bool generateWaitcntInstBefore(MachineInstr &MI,
WaitcntBrackets &ScoreBrackets,
@@ -591,7 +579,6 @@ public:
WaitcntBrackets &ScoreBrackets);
bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block,
WaitcntBrackets &ScoreBrackets);
- static bool asynchronouslyWritesSCC(unsigned Opcode);
};
// This objects maintains the current score brackets of each wait counter, and
@@ -643,8 +630,6 @@ public:
bool merge(const WaitcntBrackets &Other);
RegInterval getRegInterval(const MachineInstr *MI,
- const MachineRegisterInfo *MRI,
- const SIRegisterInfo *TRI,
const MachineOperand &Op) const;
bool counterOutOfOrder(InstCounterType T) const;
@@ -662,9 +647,7 @@ public:
void applyWaitcnt(const AMDGPU::Waitcnt &Wait);
void applyWaitcnt(InstCounterType T, unsigned Count);
void applyXcnt(const AMDGPU::Waitcnt &Wait);
- void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI,
- const MachineRegisterInfo *MRI, WaitEventType E,
- MachineInstr &MI);
+ void updateByEvent(WaitEventType E, MachineInstr &MI);
unsigned hasPendingEvent() const { return PendingEvents; }
unsigned hasPendingEvent(WaitEventType E) const {
@@ -773,10 +756,8 @@ private:
void setScoreByInterval(RegInterval Interval, InstCounterType CntTy,
unsigned Score);
- void setScoreByOperand(const MachineInstr *MI, const SIRegisterInfo *TRI,
- const MachineRegisterInfo *MRI,
- const MachineOperand &Op, InstCounterType CntTy,
- unsigned Val);
+ void setScoreByOperand(const MachineInstr *MI, const MachineOperand &Op,
+ InstCounterType CntTy, unsigned Val);
const SIInsertWaitcnts *Context;
@@ -833,12 +814,13 @@ public:
} // end anonymous namespace
RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI,
- const MachineRegisterInfo *MRI,
- const SIRegisterInfo *TRI,
const MachineOperand &Op) const {
if (Op.getReg() == AMDGPU::SCC)
return {SCC, SCC + 1};
+ const SIRegisterInfo *TRI = Context->TRI;
+ const MachineRegisterInfo *MRI = Context->MRI;
+
if (!TRI->isInAllocatableClass(Op.getReg()))
return {-1, -1};
@@ -903,11 +885,9 @@ void WaitcntBrackets::setScoreByInterval(RegInterval Interval,
}
void WaitcntBrackets::setScoreByOperand(const MachineInstr *MI,
- const SIRegisterInfo *TRI,
- const MachineRegisterInfo *MRI,
const MachineOperand &Op,
InstCounterType CntTy, unsigned Score) {
- RegInterval Interval = getRegInterval(MI, MRI, TRI, Op);
+ RegInterval Interval = getRegInterval(MI, Op);
setScoreByInterval(Interval, CntTy, Score);
}
@@ -939,10 +919,7 @@ bool WaitcntBrackets::hasPointSamplePendingVmemTypes(
return hasOtherPendingVmemTypes(Interval, VMEM_NOSAMPLER);
}
-void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
- const SIRegisterInfo *TRI,
- const MachineRegisterInfo *MRI,
- WaitEventType E, MachineInstr &Inst) {
+void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) {
InstCounterType T = eventCounter(Context->WaitEventMaskForInst, E);
unsigned UB = getScoreUB(T);
@@ -955,6 +932,10 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
PendingEvents |= 1 << E;
setScoreUB(T, CurrScore);
+ const SIRegisterInfo *TRI = Context->TRI;
+ const MachineRegisterInfo *MRI = Context->MRI;
+ const SIInstrInfo *TII = Context->TII;
+
if (T == EXP_CNT) {
// Put score on the source vgprs. If this is a store, just use those
// specific register(s).
@@ -962,59 +943,56 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
// All GDS operations must protect their address register (same as
// export.)
if (const auto *AddrOp = TII->getNamedOperand(Inst, AMDGPU::OpName::addr))
- setScoreByOperand(&Inst, TRI, MRI, *AddrOp, EXP_CNT, CurrScore);
+ setScoreByOperand(&Inst, *AddrOp, EXP_CNT, CurrScore);
if (Inst.mayStore()) {
if (const auto *Data0 =
TII->getNamedOperand(Inst, AMDGPU::OpName::data0))
- setScoreByOperand(&Inst, TRI, MRI, *Data0, EXP_CNT, CurrScore);
+ setScoreByOperand(&Inst, *Data0, EXP_CNT, CurrScore);
if (const auto *Data1 =
TII->getNamedOperand(Inst, AMDGPU::OpName::data1))
- setScoreByOperand(&Inst, TRI, MRI, *Data1, EXP_CNT, CurrScore);
+ setScoreByOperand(&Inst, *Data1, EXP_CNT, CurrScore);
} else if (SIInstrInfo::isAtomicRet(Inst) && !SIInstrInfo::isGWS(Inst) &&
Inst.getOpcode() != AMDGPU::DS_APPEND &&
Inst.getOpcode() != AMDGPU::DS_CONSUME &&
Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
for (const MachineOperand &Op : Inst.all_uses()) {
if (TRI->isVectorRegister(*MRI, Op.getReg()))
- setScoreByOperand(&Inst, TRI, MRI, Op, EXP_CNT, CurrScore);
+ setScoreByOperand(&Inst, Op, EXP_CNT, CurrScore);
}
}
} else if (TII->isFLAT(Inst)) {
if (Inst.mayStore()) {
- setScoreByOperand(&Inst, TRI, MRI,
+ setScoreByOperand(&Inst,
*TII->getNamedOperand(Inst, AMDGPU::OpName::data),
EXP_CNT, CurrScore);
} else if (SIInstrInfo::isAtomicRet(Inst)) {
- setScoreByOperand(&Inst, TRI, MRI,
+ setScoreByOperand(&Inst,
*TII->getNamedOperand(Inst, AMDGPU::OpName::data),
EXP_CNT, CurrScore);
}
} else if (TII->isMIMG(Inst)) {
if (Inst.mayStore()) {
- setScoreByOperand(&Inst, TRI, MRI, Inst.getOperand(0), EXP_CNT,
- CurrScore);
+ setScoreByOperand(&Inst, Inst.getOperand(0), EXP_CNT, CurrScore);
} else if (SIInstrInfo::isAtomicRet(Inst)) {
- setScoreByOperand(&Inst, TRI, MRI,
+ setScoreByOperand(&Inst,
*TII->getNamedOperand(Inst, AMDGPU::OpName::data),
EXP_CNT, CurrScore);
}
} else if (TII->isMTBUF(Inst)) {
if (Inst.mayStore())
- setScoreByOperand(&Inst, TRI, MRI, Inst.getOperand(0), EXP_CNT,
- CurrScore);
+ setScoreByOperand(&Inst, Inst.getOperand(0), EXP_CNT, CurrScore);
} else if (TII->isMUBUF(Inst)) {
if (Inst.mayStore()) {
- setScoreByOperand(&Inst, TRI, MRI, Inst.getOperand(0), EXP_CNT,
- CurrScore);
+ setScoreByOperand(&Inst, Inst.getOperand(0), EXP_CNT, CurrScore);
} else if (SIInstrInfo::isAtomicRet(Inst)) {
- setScoreByOperand(&Inst, TRI, MRI,
+ setScoreByOperand(&Inst,
*TII->getNamedOperand(Inst, AMDGPU::OpName::data),
EXP_CNT, CurrScore);
}
} else if (TII->isLDSDIR(Inst)) {
// LDSDIR instructions attach the score to the destination.
- setScoreByOperand(&Inst, TRI, MRI,
+ setScoreByOperand(&Inst,
*TII->getNamedOperand(Inst, AMDGPU::OpName::vdst),
EXP_CNT, CurrScore);
} else {
@@ -1025,18 +1003,18 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
// score.
for (MachineOperand &DefMO : Inst.all_defs()) {
if (TRI->isVGPR(*MRI, DefMO.getReg())) {
- setScoreByOperand(&Inst, TRI, MRI, DefMO, EXP_CNT, CurrScore);
+ setScoreByOperand(&Inst, DefMO, EXP_CNT, CurrScore);
}
}
}
for (const MachineOperand &Op : Inst.all_uses()) {
if (TRI->isVectorRegister(*MRI, Op.getReg()))
- setScoreByOperand(&Inst, TRI, MRI, Op, EXP_CNT, CurrScore);
+ setScoreByOperand(&Inst, Op, EXP_CNT, CurrScore);
}
}
} else if (T == X_CNT) {
for (const MachineOperand &Op : Inst.all_uses())
- setScoreByOperand(&Inst, TRI, MRI, Op, T, CurrScore);
+ setScoreByOperand(&Inst, Op, T, CurrScore);
} else /* LGKM_CNT || EXP_CNT || VS_CNT || NUM_INST_CNTS */ {
// Match the score to the destination registers.
//
@@ -1048,7 +1026,7 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
// Special cases where implicit register defs exists, such as M0 or VCC,
// but none with memory instructions.
for (const MachineOperand &Op : Inst.defs()) {
- RegInterval Interval = getRegInterval(&Inst, MRI, TRI, Op);
+ RegInterval Interval = getRegInterval(&Inst, Op);
if (T == LOAD_CNT || T == SAMPLE_CNT || T == BVH_CNT) {
if (Interval.first >= NUM_ALL_VGPRS)
continue;
@@ -1109,7 +1087,7 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
setRegScore(FIRST_LDS_VGPR, T, CurrScore);
}
- if (Context->asynchronouslyWritesSCC(Inst.getOpcode())) {
+ if (SIInstrInfo::isSBarrierSCCWrite(Inst.getOpcode())) {
setRegScore(SCC, T, CurrScore);
PendingSCCWrite = &Inst;
}
@@ -1831,12 +1809,6 @@ bool WaitcntGeneratorGFX12Plus::createNewWaitcnt(
return Modified;
}
-static bool readsVCCZ(const MachineInstr &MI) {
- unsigned Opc = MI.getOpcode();
- return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) &&
- !MI.getOperand(1).isUndef();
-}
-
/// \returns true if the callee inserts an s_waitcnt 0 on function entry.
static bool callWaitsOnFunctionEntry(const MachineInstr &MI) {
// Currently all conventions wait, but this may not always be the case.
@@ -1871,26 +1843,24 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
assert(!MI.isMetaInstruction());
AMDGPU::Waitcnt Wait;
+ const unsigned Opc = MI.getOpcode();
// FIXME: This should have already been handled by the memory legalizer.
// Removing this currently doesn't affect any lit tests, but we need to
// verify that nothing was relying on this. The number of buffer invalidates
// being handled here should not be expanded.
- if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 ||
- MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC ||
- MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL ||
- MI.getOpcode() == AMDGPU::BUFFER_GL0_INV ||
- MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) {
+ if (Opc == AMDGPU::BUFFER_WBINVL1 || Opc == AMDGPU::BUFFER_WBINVL1_SC ||
+ Opc == AMDGPU::BUFFER_WBINVL1_VOL || Opc == AMDGPU::BUFFER_GL0_INV ||
+ Opc == AMDGPU::BUFFER_GL1_INV) {
Wait.LoadCnt = 0;
}
// All waits must be resolved at call return.
// NOTE: this could be improved with knowledge of all call sites or
// with knowledge of the called routines.
- if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
- MI.getOpcode() == AMDGPU::SI_RETURN ||
- MI.getOpcode() == AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN ||
- MI.getOpcode() == AMDGPU::S_SETPC_B64_return ||
+ if (Opc == AMDGPU::SI_RETURN_TO_EPILOG || Opc == AMDGPU::SI_RETURN ||
+ Opc == AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN ||
+ Opc == AMDGPU::S_SETPC_B64_return ||
(MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) {
Wait = Wait.combined(WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/false));
}
@@ -1902,8 +1872,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
// send a message to explicitly release all VGPRs before the stores have
// completed, but it is only safe to do this if there are no outstanding
// scratch stores.
- else if (MI.getOpcode() == AMDGPU::S_ENDPGM ||
- MI.getOpcode() == AMDGPU::S_ENDPGM_SAVED) {
+ else if (Opc == AMDGPU::S_ENDPGM || Opc == AMDGPU::S_ENDPGM_SAVED) {
if (!WCG->isOptNone() &&
(MI.getMF()->getInfo<SIMachineFunctionInfo>()->isDynamicVGPREnabled() ||
(ST->getGeneration() >= AMDGPUSubtarget::GFX11 &&
@@ -1912,8 +1881,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
ReleaseVGPRInsts.insert(&MI);
}
// Resolve vm waits before gs-done.
- else if ((MI.getOpcode() == AMDGPU::S_SENDMSG ||
- MI.getOpcode() == AMDGPU::S_SENDMSGHALT) &&
+ else if ((Opc == AMDGPU::S_SENDMSG || Opc == AMDGPU::S_SENDMSGHALT) &&
ST->hasLegacyGeometry() &&
((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_PreGFX11_) ==
AMDGPU::SendMsg::ID_GS_DONE_PreGFX11)) {
@@ -1938,7 +1906,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
// Wait for any pending GDS instruction to complete before any
// "Always GDS" instruction.
- if (TII->isAlwaysGDS(MI.getOpcode()) && ScoreBrackets.hasPendingGDS())
+ if (TII->isAlwaysGDS(Opc) && ScoreBrackets.hasPendingGDS())
addWait(Wait, DS_CNT, ScoreBrackets.getPendingGDSWait());
if (MI.isCall() && callWaitsOnFunctionEntry(MI)) {
@@ -1950,7 +1918,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
const auto &CallAddrOp = *TII->getNamedOperand(MI, AMDGPU::OpName::src0);
if (CallAddrOp.isReg()) {
RegInterval CallAddrOpInterval =
- ScoreBrackets.getRegInterval(&MI, MRI, TRI, CallAddrOp);
+ ScoreBrackets.getRegInterval(&MI, CallAddrOp);
ScoreBrackets.determineWait(SmemAccessCounter, CallAddrOpInterval,
Wait);
@@ -1958,13 +1926,13 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
if (const auto *RtnAddrOp =
TII->getNamedOperand(MI, AMDGPU::OpName::dst)) {
RegInterval RtnAddrOpInterval =
- ScoreBrackets.getRegInterval(&MI, MRI, TRI, *RtnAddrOp);
+ ScoreBrackets.getRegInterval(&MI, *RtnAddrOp);
ScoreBrackets.determineWait(SmemAccessCounter, RtnAddrOpInterval,
Wait);
}
}
- } else if (MI.getOpcode() == AMDGPU::S_BARRIER_WAIT) {
+ } else if (Opc == AMDGPU::S_BARRIER_WAIT) {
ScoreBrackets.tryClearSCCWriteEvent(&MI);
} else {
// FIXME: Should not be relying on memoperands.
@@ -2022,7 +1990,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
if (Op.isTied() && Op.isUse() && TII->doesNotReadTiedSource(MI))
continue;
- RegInterval Interval = ScoreBrackets.getRegInterval(&MI, MRI, TRI, Op);
+ RegInterval Interval = ScoreBrackets.getRegInterval(&MI, Op);
const bool IsVGPR = TRI->isVectorRegister(*MRI, Op.getReg());
if (IsVGPR) {
@@ -2061,7 +2029,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
ScoreBrackets.determineWait(SmemAccessCounter, Interval, Wait);
}
- if (hasXcnt() && Op.isDef())
+ if (ST->hasWaitXCnt() && Op.isDef())
ScoreBrackets.determineWait(X_CNT, Interval, Wait);
}
}
@@ -2079,18 +2047,17 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
//
// In all other cases, ensure safety by ensuring that there are no outstanding
// memory operations.
- if (MI.getOpcode() == AMDGPU::S_BARRIER &&
- !ST->hasAutoWaitcntBeforeBarrier() && !ST->supportsBackOffBarrier()) {
+ if (Opc == AMDGPU::S_BARRIER && !ST->hasAutoWaitcntBeforeBarrier() &&
+ !ST->supportsBackOffBarrier()) {
Wait = Wait.combined(WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/true));
}
// TODO: Remove this work-around, enable the assert for Bug 457939
// after fixing the scheduler. Also, the Shader Compiler code is
// independent of target.
- if (readsVCCZ(MI) && ST->hasReadVCCZBug()) {
- if (ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
- Wait.DsCnt = 0;
- }
+ if (SIInstrInfo::isCBranchVCCZRead(MI) && ST->hasReadVCCZBug() &&
+ ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
+ Wait.DsCnt = 0;
}
// Verify that the wait is actually needed.
@@ -2165,19 +2132,19 @@ bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt Wait,
}
// XCnt may be already consumed by a load wait.
- if (Wait.KmCnt == 0 && Wait.XCnt != ~0u &&
- !ScoreBrackets.hasPendingEvent(SMEM_GROUP))
- Wait.XCnt = ~0u;
+ if (Wait.XCnt != ~0u) {
+ if (Wait.KmCnt == 0 && !ScoreBrackets.hasPendingEvent(SMEM_GROUP))
+ Wait.XCnt = ~0u;
- if (Wait.LoadCnt == 0 && Wait.XCnt != ~0u &&
- !ScoreBrackets.hasPendingEvent(VMEM_GROUP))
- Wait.XCnt = ~0u;
+ if (Wait.LoadCnt == 0 && !ScoreBrackets.hasPendingEvent(VMEM_GROUP))
+ Wait.XCnt = ~0u;
- // Since the translation for VMEM addresses occur in-order, we can skip the
- // XCnt if the current instruction is of VMEM type and has a memory dependency
- // with another VMEM instruction in flight.
- if (Wait.XCnt != ~0u && isVmemAccess(*It))
- Wait.XCnt = ~0u;
+ // Since the translation for VMEM addresses occur in-order, we can skip the
+ // XCnt if the current instruction is of VMEM type and has a memory
+ // dependency with another VMEM instruction in flight.
+ if (isVmemAccess(*It))
+ Wait.XCnt = ~0u;
+ }
if (WCG->createNewWaitcnt(Block, It, Wait))
Modified = true;
@@ -2185,75 +2152,11 @@ bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt Wait,
return Modified;
}
-// This is a flat memory operation. Check to see if it has memory tokens other
-// than LDS. Other address spaces supported by flat memory operations involve
-// global memory.
-bool SIInsertWaitcnts::mayAccessVMEMThroughFlat(const MachineInstr &MI) const {
- assert(TII->isFLAT(MI));
-
- // All flat instructions use the VMEM counter except prefetch.
- if (!TII->usesVM_CNT(MI))
- return false;
-
- // If there are no memory operands then conservatively assume the flat
- // operation may access VMEM.
- if (MI.memoperands_empty())
- return true;
-
- // See if any memory operand specifies an address space that involves VMEM.
- // Flat operations only supported FLAT, LOCAL (LDS), or address spaces
- // involving VMEM such as GLOBAL, CONSTANT, PRIVATE (SCRATCH), etc. The REGION
- // (GDS) address space is not supported by flat operations. Therefore, simply
- // return true unless only the LDS address space is found.
- for (const MachineMemOperand *Memop : MI.memoperands()) {
- unsigned AS = Memop->getAddrSpace();
- assert(AS != AMDGPUAS::REGION_ADDRESS);
- if (AS != AMDGPUAS::LOCAL_ADDRESS)
- return true;
- }
-
- return false;
-}
-
-// This is a flat memory operation. Check to see if it has memory tokens for
-// either LDS or FLAT.
-bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const {
- assert(TII->isFLAT(MI));
-
- // Flat instruction such as SCRATCH and GLOBAL do not use the lgkm counter.
- if (!TII->usesLGKM_CNT(MI))
- return false;
-
- // If in tgsplit mode then there can be no use of LDS.
- if (ST->isTgSplitEnabled())
- return false;
-
- // If there are no memory operands then conservatively assume the flat
- // operation may access LDS.
- if (MI.memoperands_empty())
- return true;
-
- // See if any memory operand specifies an address space that involves LDS.
- for (const MachineMemOperand *Memop : MI.memoperands()) {
- unsigned AS = Memop->getAddrSpace();
- if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS)
- return true;
- }
-
- return false;
-}
-
bool SIInsertWaitcnts::isVmemAccess(const MachineInstr &MI) const {
- return (TII->isFLAT(MI) && mayAccessVMEMThroughFlat(MI)) ||
+ return (TII->isFLAT(MI) && TII->mayAccessVMEMThroughFlat(MI)) ||
(TII->isVMEM(MI) && !AMDGPU::getMUBUFIsBufferInv(MI.getOpcode()));
}
-static bool isGFX12CacheInvOrWBInst(MachineInstr &Inst) {
- auto Opc = Inst.getOpcode();
- return Opc == AMDGPU::GLOBAL_INV || Opc == AMDGPU::GLOBAL_WB ||
- Opc == AMDGPU::GLOBAL_WBINV;
-}
-
// Return true if the next instruction is S_ENDPGM, following fallthrough
// blocks if necessary.
bool SIInsertWaitcnts::isNextENDPGM(MachineBasicBlock::instr_iterator It,
@@ -2324,16 +2227,15 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) {
if (TII->isAlwaysGDS(Inst.getOpcode()) ||
TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
- ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst);
- ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst);
+ ScoreBrackets->updateByEvent(GDS_ACCESS, Inst);
+ ScoreBrackets->updateByEvent(GDS_GPR_LOCK, Inst);
ScoreBrackets->setPendingGDS();
} else {
- ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
+ ScoreBrackets->updateByEvent(LDS_ACCESS, Inst);
}
} else if (TII->isFLAT(Inst)) {
- if (isGFX12CacheInvOrWBInst(Inst)) {
- ScoreBrackets->updateByEvent(TII, TRI, MRI, getVmemWaitEventType(Inst),
- Inst);
+ if (SIInstrInfo::isGFX12CacheInvOrWBInst(Inst.getOpcode())) {
+ ScoreBrackets->updateByEvent(getVmemWaitEventType(Inst), Inst);
return;
}
@@ -2341,16 +2243,15 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
int FlatASCount = 0;
- if (mayAccessVMEMThroughFlat(Inst)) {
+ if (TII->mayAccessVMEMThroughFlat(Inst)) {
++FlatASCount;
IsVMEMAccess = true;
- ScoreBrackets->updateByEvent(TII, TRI, MRI, getVmemWaitEventType(Inst),
- Inst);
+ ScoreBrackets->updateByEvent(getVmemWaitEventType(Inst), Inst);
}
- if (mayAccessLDSThroughFlat(Inst)) {
+ if (TII->mayAccessLDSThroughFlat(Inst)) {
++FlatASCount;
- ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
+ ScoreBrackets->updateByEvent(LDS_ACCESS, Inst);
}
// This is a flat memory operation that access both VMEM and LDS, so note it
@@ -2361,16 +2262,15 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
} else if (SIInstrInfo::isVMEM(Inst) &&
!llvm::AMDGPU::getMUBUFIsBufferInv(Inst.getOpcode())) {
IsVMEMAccess = true;
- ScoreBrackets->updateByEvent(TII, TRI, MRI, getVmemWaitEventType(Inst),
- Inst);
+ ScoreBrackets->updateByEvent(getVmemWaitEventType(Inst), Inst);
if (ST->vmemWriteNeedsExpWaitcnt() &&
(Inst.mayStore() || SIInstrInfo::isAtomicRet(Inst))) {
- ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst);
+ ScoreBrackets->updateByEvent(VMW_GPR_LOCK, Inst);
}
} else if (TII->isSMRD(Inst)) {
IsSMEMAccess = true;
- ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
+ ScoreBrackets->updateByEvent(SMEM_ACCESS, Inst);
} else if (Inst.isCall()) {
if (callWaitsOnFunctionReturn(Inst)) {
// Act as a wait on everything
@@ -2382,45 +2282,45 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt());
}
} else if (SIInstrInfo::isLDSDIR(Inst)) {
- ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_LDS_ACCESS, Inst);
+ ScoreBrackets->updateByEvent(EXP_LDS_ACCESS, Inst);
} else if (TII->isVINTERP(Inst)) {
int64_t Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::waitexp)->getImm();
ScoreBrackets->applyWaitcnt(EXP_CNT, Imm);
} else if (SIInstrInfo::isEXP(Inst)) {
unsigned Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
if (Imm >= AMDGPU::Exp::ET_PARAM0 && Imm <= AMDGPU::Exp::ET_PARAM31)
- ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst);
+ ScoreBrackets->updateByEvent(EXP_PARAM_ACCESS, Inst);
else if (Imm >= AMDGPU::Exp::ET_POS0 && Imm <= AMDGPU::Exp::ET_POS_LAST)
- ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst);
+ ScoreBrackets->updateByEvent(EXP_POS_ACCESS, Inst);
else
- ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst);
- } else if (asynchronouslyWritesSCC(Inst.getOpcode())) {
- ScoreBrackets->updateByEvent(TII, TRI, MRI, SCC_WRITE, Inst);
+ ScoreBrackets->updateByEvent(EXP_GPR_LOCK, Inst);
+ } else if (SIInstrInfo::isSBarrierSCCWrite(Inst.getOpcode())) {
+ ScoreBrackets->updateByEvent(SCC_WRITE, Inst);
} else {
switch (Inst.getOpcode()) {
case AMDGPU::S_SENDMSG:
case AMDGPU::S_SENDMSG_RTN_B32:
case AMDGPU::S_SENDMSG_RTN_B64:
case AMDGPU::S_SENDMSGHALT:
- ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst);
+ ScoreBrackets->updateByEvent(SQ_MESSAGE, Inst);
break;
case AMDGPU::S_MEMTIME:
case AMDGPU::S_MEMREALTIME:
case AMDGPU::S_GET_BARRIER_STATE_M0:
case AMDGPU::S_GET_BARRIER_STATE_IMM:
- ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
+ ScoreBrackets->updateByEvent(SMEM_ACCESS, Inst);
break;
}
}
- if (!hasXcnt())
+ if (!ST->hasWaitXCnt())
return;
if (IsVMEMAccess)
- ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_GROUP, Inst);
+ ScoreBrackets->updateByEvent(VMEM_GROUP, Inst);
if (IsSMEMAccess)
- ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_GROUP, Inst);
+ ScoreBrackets->updateByEvent(SMEM_GROUP, Inst);
}
bool WaitcntBrackets::mergeScore(const MergeInfo &M, unsigned &Score,
@@ -2478,9 +2378,8 @@ bool WaitcntBrackets::merge(const WaitcntBrackets &Other) {
unsigned OldEventsHasSCCWrite = OldEvents & (1 << SCC_WRITE);
if (!OldEventsHasSCCWrite) {
PendingSCCWrite = Other.PendingSCCWrite;
- } else {
- if (PendingSCCWrite != Other.PendingSCCWrite)
- PendingSCCWrite = nullptr;
+ } else if (PendingSCCWrite != Other.PendingSCCWrite) {
+ PendingSCCWrite = nullptr;
}
}
}
@@ -2516,12 +2415,6 @@ static bool isWaitInstr(MachineInstr &Inst) {
counterTypeForInstr(Opcode).has_value();
}
-bool SIInsertWaitcnts::asynchronouslyWritesSCC(unsigned Opcode) {
- return Opcode == AMDGPU::S_BARRIER_LEAVE ||
- Opcode == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM ||
- Opcode == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_M0;
-}
-
// Generate s_waitcnt instructions where needed.
bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
MachineBasicBlock &Block,
@@ -2578,7 +2471,7 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
OldWaitcntInstr = nullptr;
// Restore vccz if it's not known to be correct already.
- bool RestoreVCCZ = !VCCZCorrect && readsVCCZ(Inst);
+ bool RestoreVCCZ = !VCCZCorrect && SIInstrInfo::isCBranchVCCZRead(Inst);
// Don't examine operands unless we need to track vccz correctness.
if (ST->hasReadVCCZBug() || !ST->partialVCCWritesUpdateVCCZ()) {
@@ -2701,7 +2594,7 @@ bool SIInsertWaitcnts::isPreheaderToFlush(
bool SIInsertWaitcnts::isVMEMOrFlatVMEM(const MachineInstr &MI) const {
if (SIInstrInfo::isFLAT(MI))
- return mayAccessVMEMThroughFlat(MI);
+ return TII->mayAccessVMEMThroughFlat(MI);
return SIInstrInfo::isVMEM(MI);
}
@@ -2724,15 +2617,14 @@ bool SIInsertWaitcnts::shouldFlushVmCnt(MachineLoop *ML,
for (MachineBasicBlock *MBB : ML->blocks()) {
for (MachineInstr &MI : *MBB) {
if (isVMEMOrFlatVMEM(MI)) {
- if (MI.mayLoad())
- HasVMemLoad = true;
- if (MI.mayStore())
- HasVMemStore = true;
+ HasVMemLoad |= MI.mayLoad();
+ HasVMemStore |= MI.mayStore();
}
+
for (const MachineOperand &Op : MI.all_uses()) {
if (Op.isDebug() || !TRI->isVectorRegister(*MRI, Op.getReg()))
continue;
- RegInterval Interval = Brackets.getRegInterval(&MI, MRI, TRI, Op);
+ RegInterval Interval = Brackets.getRegInterval(&MI, Op);
// Vgpr use
for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
// If we find a register that is loaded inside the loop, 1. and 2.
@@ -2757,7 +2649,7 @@ bool SIInsertWaitcnts::shouldFlushVmCnt(MachineLoop *ML,
// VMem load vgpr def
if (isVMEMOrFlatVMEM(MI) && MI.mayLoad()) {
for (const MachineOperand &Op : MI.all_defs()) {
- RegInterval Interval = Brackets.getRegInterval(&MI, MRI, TRI, Op);
+ RegInterval Interval = Brackets.getRegInterval(&MI, Op);
for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
// If we find a register that is loaded inside the loop, 1. and 2.
// are invalidated and we can exit.
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 044ea86..56435a5 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -4344,6 +4344,59 @@ bool SIInstrInfo::mayAccessScratchThroughFlat(const MachineInstr &MI) const {
});
}
+bool SIInstrInfo::mayAccessVMEMThroughFlat(const MachineInstr &MI) const {
+ assert(isFLAT(MI));
+
+ // All flat instructions use the VMEM counter except prefetch.
+ if (!usesVM_CNT(MI))
+ return false;
+
+ // If there are no memory operands then conservatively assume the flat
+ // operation may access VMEM.
+ if (MI.memoperands_empty())
+ return true;
+
+ // See if any memory operand specifies an address space that involves VMEM.
+ // Flat operations only supported FLAT, LOCAL (LDS), or address spaces
+ // involving VMEM such as GLOBAL, CONSTANT, PRIVATE (SCRATCH), etc. The REGION
+ // (GDS) address space is not supported by flat operations. Therefore, simply
+ // return true unless only the LDS address space is found.
+ for (const MachineMemOperand *Memop : MI.memoperands()) {
+ unsigned AS = Memop->getAddrSpace();
+ assert(AS != AMDGPUAS::REGION_ADDRESS);
+ if (AS != AMDGPUAS::LOCAL_ADDRESS)
+ return true;
+ }
+
+ return false;
+}
+
+bool SIInstrInfo::mayAccessLDSThroughFlat(const MachineInstr &MI) const {
+ assert(isFLAT(MI));
+
+ // Flat instruction such as SCRATCH and GLOBAL do not use the lgkm counter.
+ if (!usesLGKM_CNT(MI))
+ return false;
+
+ // If in tgsplit mode then there can be no use of LDS.
+ if (ST.isTgSplitEnabled())
+ return false;
+
+ // If there are no memory operands then conservatively assume the flat
+ // operation may access LDS.
+ if (MI.memoperands_empty())
+ return true;
+
+ // See if any memory operand specifies an address space that involves LDS.
+ for (const MachineMemOperand *Memop : MI.memoperands()) {
+ unsigned AS = Memop->getAddrSpace();
+ if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS)
+ return true;
+ }
+
+ return false;
+}
+
bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) {
// Skip the full operand and register alias search modifiesRegister
// does. There's only a handful of instructions that touch this, it's only an
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index c2252af..a21089f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -688,6 +688,12 @@ public:
/// to not hit scratch.
bool mayAccessScratchThroughFlat(const MachineInstr &MI) const;
+ /// \returns true for FLAT instructions that can access VMEM.
+ bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const;
+
+ /// \returns true for FLAT instructions that can access LDS.
+ bool mayAccessLDSThroughFlat(const MachineInstr &MI) const;
+
static bool isBlockLoadStore(uint16_t Opcode) {
switch (Opcode) {
case AMDGPU::SI_BLOCK_SPILL_V1024_SAVE:
@@ -748,6 +754,18 @@ public:
return isLDSDMA(MI) && MI.getOpcode() != AMDGPU::BUFFER_STORE_LDS_DWORD;
}
+ static bool isSBarrierSCCWrite(unsigned Opcode) {
+ return Opcode == AMDGPU::S_BARRIER_LEAVE ||
+ Opcode == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM ||
+ Opcode == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_M0;
+ }
+
+ static bool isCBranchVCCZRead(const MachineInstr &MI) {
+ unsigned Opc = MI.getOpcode();
+ return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) &&
+ !MI.getOperand(1).isUndef();
+ }
+
static bool isWQM(const MachineInstr &MI) {
return MI.getDesc().TSFlags & SIInstrFlags::WQM;
}
@@ -1010,6 +1028,11 @@ public:
Opcode == AMDGPU::DS_GWS_BARRIER;
}
+ static bool isGFX12CacheInvOrWBInst(unsigned Opc) {
+ return Opc == AMDGPU::GLOBAL_INV || Opc == AMDGPU::GLOBAL_WB ||
+ Opc == AMDGPU::GLOBAL_WBINV;
+ }
+
static bool isF16PseudoScalarTrans(unsigned Opcode) {
return Opcode == AMDGPU::V_S_EXP_F16_e64 ||
Opcode == AMDGPU::V_S_LOG_F16_e64 ||
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index 8f1dd62..5630580 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -1163,6 +1163,22 @@ def VS_64_Lo256 : SIRegisterClass<"AMDGPU", VReg_64.RegTypes, 32,
let HasSGPR = 1;
let Size = 64;
}
+
+def VS_128 : SIRegisterClass<"AMDGPU", VReg_128.RegTypes, 32,
+ (add VReg_128, SReg_128)> {
+ let isAllocatable = 0;
+ let HasVGPR = 1;
+ let HasSGPR = 1;
+ let Size = 128;
+}
+
+def VS_128_Align2 : SIRegisterClass<"AMDGPU", VReg_128.RegTypes, 32,
+ (add VReg_128_Align2, SReg_128)> {
+ let isAllocatable = 0;
+ let HasVGPR = 1;
+ let HasSGPR = 1;
+ let Size = 128;
+}
} // End GeneratePressureSet = 0
// Define a register tuple class, along with one requiring an even
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index b3fd8c7..84287b6 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -352,10 +352,12 @@ def S_XNOR_SAVEEXEC_B64 : SOP1_64 <"s_xnor_saveexec_b64">;
} // End hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC]
+let Defs = [SCC] in {
def S_QUADMASK_B32 : SOP1_32 <"s_quadmask_b32",
[(set i32:$sdst, (int_amdgcn_s_quadmask i32:$src0))]>;
def S_QUADMASK_B64 : SOP1_64 <"s_quadmask_b64",
[(set i64:$sdst, (int_amdgcn_s_quadmask i64:$src0))]>;
+}
let Uses = [M0] in {
def S_MOVRELS_B32 : SOP1_32R <"s_movrels_b32">;
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 77df721..54f57e0 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -314,9 +314,10 @@ let SubtargetPredicate = HasGFX950Insts, OtherPredicates = [HasBF16ConversionIns
defm V_CVT_F32_BF16 : VOP1Inst_t16 <"v_cvt_f32_bf16", VOP_F32_BF16>;
}
let SubtargetPredicate = isGFX1250Plus, OtherPredicates = [HasBF16ConversionInsts] in {
- defm V_CVT_F32_BF16_gfx1250 : VOP1Inst_t16_with_profiles <"v_cvt_f32_bf16_gfx1250", VOP_F32_BF16,
- VOPProfile_CVT_F32_BF16_gfx1250_t16,
- VOPProfile_CVT_F32_BF16_gfx1250_fake16>;
+ let True16Predicate = UseRealTrue16Insts in
+ defm V_CVT_F32_BF16_gfx1250_t16 : VOP1Inst <"V_CVT_F32_BF16_gfx1250_t16", VOPProfile_CVT_F32_BF16_gfx1250_t16>;
+ let True16Predicate = UseFakeTrue16Insts in
+ defm V_CVT_F32_BF16_gfx1250_fake16 : VOP1Inst <"V_CVT_F32_BF16_gfx1250_fake16", VOPProfile_CVT_F32_BF16_gfx1250_fake16>;
}
let ReadsModeReg = 0, mayRaiseFPException = 0 in {
@@ -899,6 +900,7 @@ class VOP1_DPP16_Gen<bits<8> op, VOP1_DPP_Pseudo ps, GFXGen Gen, VOPProfile p =
let DecoderNamespace = Gen.DecoderNamespace;
let OtherPredicates = !listconcat(ps.OtherPredicates,
!if(p.HasExt64BitDPP, [HasDPALU_DPP], []));
+ let True16Predicate = ps.True16Predicate;
}
class VOP1_DPP8<bits<8> op, VOP1_Pseudo ps, VOPProfile p = ps.Pfl> :
@@ -921,6 +923,7 @@ class VOP1_DPP8_Gen<bits<8> op, VOP1_Pseudo ps, GFXGen Gen, VOPProfile p = ps.Pf
VOP1_DPP8<op, ps, p> {
let AssemblerPredicate = Gen.AssemblerPredicate;
let DecoderNamespace = Gen.DecoderNamespace;
+ let True16Predicate = ps.True16Predicate;
}
//===----------------------------------------------------------------------===//
@@ -1149,7 +1152,7 @@ defm V_TANH_F16 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x01f>;
defm V_PERMLANE16_SWAP_B32 : VOP1_Real_OpSelIsDPP_gfx1250<0x049>;
defm V_TANH_BF16 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x04a>;
defm V_PRNG_B32 : VOP1_Real_FULL<GFX1250Gen, 0x04b>;
-defm V_CVT_F32_BF16 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x072, "v_cvt_f32_bf16", "V_CVT_F32_BF16_gfx1250">;
+defm V_CVT_F32_BF16_gfx1250 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x072, "v_cvt_f32_bf16">;
defm V_SAT_PK4_I4_I8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x073>;
defm V_SAT_PK4_U4_U8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x074>;
defm V_CVT_PK_F16_FP8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x075>;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index fa130a1..26ff54c 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -775,6 +775,16 @@ class VectorType;
bool shouldFoldConstantShiftPairToMask(const SDNode *N,
CombineLevel Level) const override;
+ /// Return true if it is profitable to fold a pair of shifts into a mask.
+ bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override {
+ EVT VT = Y.getValueType();
+
+ if (VT.isVector())
+ return false;
+
+ return VT.getScalarSizeInBits() <= 32;
+ }
+
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
unsigned SelectOpcode, SDValue X,
SDValue Y) const override;
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp
index 3329bea..58bc338 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -225,7 +225,11 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
(isTargetDarwin() || DM == DenormalMode::getPreserveSign()))
HasNEONForFP = true;
- if (isRWPI())
+ const ARM::ArchKind Arch = ARM::parseArch(TargetTriple.getArchName());
+ if (isRWPI() ||
+ (isTargetIOS() &&
+ (Arch == ARM::ArchKind::ARMV6K || Arch == ARM::ArchKind::ARMV6) &&
+ TargetTriple.isOSVersionLT(3, 0)))
ReserveR9 = true;
// If MVEVectorCostFactor is still 0 (has not been set to anything else), default it to 2
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 45d194e..939841a 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -2804,6 +2804,7 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
case Hexagon::V6_vL32b_nt_cur_npred_ai:
case Hexagon::V6_vL32b_nt_tmp_pred_ai:
case Hexagon::V6_vL32b_nt_tmp_npred_ai:
+ case Hexagon::V6_vS32Ub_npred_ai:
case Hexagon::V6_vgathermh_pseudo:
case Hexagon::V6_vgathermw_pseudo:
case Hexagon::V6_vgathermhw_pseudo:
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 3ac7c28..8c21746 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -638,6 +638,11 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
// No support for these operations with v2f32/v2i32
setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, {MVT::v2f32, MVT::v2i32}, Expand);
+
+ setOperationAction(ISD::TRUNCATE, MVT::v2i16, Expand);
+ setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND},
+ MVT::v2i32, Expand);
+
// Need custom lowering in case the index is dynamic.
if (STI.hasF32x2Instructions())
setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32},
diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index d4d9e54..4105618 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -46,6 +46,8 @@ private:
MachineBasicBlock::iterator &NextMBBI);
bool expandCCOp(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI);
+ bool expandCCOpToCMov(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI);
bool expandVMSET_VMCLR(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, unsigned Opcode);
bool expandMV_FPR16INX(MachineBasicBlock &MBB,
@@ -178,6 +180,9 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI) {
+ // First try expanding to a Conditional Move rather than a branch+mv
+ if (expandCCOpToCMov(MBB, MBBI))
+ return true;
MachineFunction *MF = MBB.getParent();
MachineInstr &MI = *MBBI;
@@ -277,6 +282,86 @@ bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB,
return true;
}
+bool RISCVExpandPseudo::expandCCOpToCMov(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) {
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+
+ if (MI.getOpcode() != RISCV::PseudoCCMOVGPR &&
+ MI.getOpcode() != RISCV::PseudoCCMOVGPRNoX0)
+ return false;
+
+ if (!STI->hasVendorXqcicm())
+ return false;
+
+ // FIXME: Would be wonderful to support LHS=X0, but not very easy.
+ if (MI.getOperand(1).getReg() == RISCV::X0 ||
+ MI.getOperand(4).getReg() == RISCV::X0 ||
+ MI.getOperand(5).getReg() == RISCV::X0)
+ return false;
+
+ auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
+
+ unsigned CMovOpcode, CMovIOpcode;
+ switch (CC) {
+ default:
+ llvm_unreachable("Unhandled CC");
+ case RISCVCC::COND_EQ:
+ CMovOpcode = RISCV::QC_MVEQ;
+ CMovIOpcode = RISCV::QC_MVEQI;
+ break;
+ case RISCVCC::COND_NE:
+ CMovOpcode = RISCV::QC_MVNE;
+ CMovIOpcode = RISCV::QC_MVNEI;
+ break;
+ case RISCVCC::COND_LT:
+ CMovOpcode = RISCV::QC_MVLT;
+ CMovIOpcode = RISCV::QC_MVLTI;
+ break;
+ case RISCVCC::COND_GE:
+ CMovOpcode = RISCV::QC_MVGE;
+ CMovIOpcode = RISCV::QC_MVGEI;
+ break;
+ case RISCVCC::COND_LTU:
+ CMovOpcode = RISCV::QC_MVLTU;
+ CMovIOpcode = RISCV::QC_MVLTUI;
+ break;
+ case RISCVCC::COND_GEU:
+ CMovOpcode = RISCV::QC_MVGEU;
+ CMovIOpcode = RISCV::QC_MVGEUI;
+ break;
+ }
+
+ if (MI.getOperand(2).getReg() == RISCV::X0) {
+ // $dst = PseudoCCMOVGPR $lhs, X0, $cc, $falsev (=$dst), $truev
+ // $dst = PseudoCCMOVGPRNoX0 $lhs, X0, $cc, $falsev (=$dst), $truev
+ // =>
+ // $dst = QC_MVccI $falsev (=$dst), $lhs, 0, $truev
+ BuildMI(MBB, MBBI, DL, TII->get(CMovIOpcode))
+ .addDef(MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(4).getReg())
+ .addReg(MI.getOperand(1).getReg())
+ .addImm(0)
+ .addReg(MI.getOperand(5).getReg());
+
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // $dst = PseudoCCMOVGPR $lhs, $rhs, $cc, $falsev (=$dst), $truev
+ // $dst = PseudoCCMOVGPRNoX0 $lhs, $rhs, $cc, $falsev (=$dst), $truev
+ // =>
+ // $dst = QC_MVcc $falsev (=$dst), $lhs, $rhs, $truev
+ BuildMI(MBB, MBBI, DL, TII->get(CMovOpcode))
+ .addDef(MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(4).getReg())
+ .addReg(MI.getOperand(1).getReg())
+ .addReg(MI.getOperand(2).getReg())
+ .addReg(MI.getOperand(5).getReg());
+ MI.eraseFromParent();
+ return true;
+}
+
bool RISCVExpandPseudo::expandVMSET_VMCLR(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned Opcode) {
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 06ce917..7d4535a 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -2395,6 +2395,7 @@ bool RISCVFrameLowering::isSupportedStackID(TargetStackID::Value ID) const {
case TargetStackID::NoAlloc:
case TargetStackID::SGPRSpill:
case TargetStackID::WasmLocal:
+ case TargetStackID::ScalablePredicateVector:
return false;
}
llvm_unreachable("Invalid TargetStackID::Value");
diff --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td
index 2e5f30f..cf6f83a 100644
--- a/llvm/lib/Target/RISCV/RISCVGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVGISel.td
@@ -109,16 +109,17 @@ def : LdPat<extloadi8, LBU, i16>; // Prefer unsigned due to no c.lb in Zcb.
def : StPat<truncstorei8, SB, GPR, i16>;
let Predicates = [HasAtomicLdSt] in {
- def : LdPat<atomic_load_aext_8, LB, i16>;
- def : LdPat<atomic_load_nonext_16, LH, i16>;
+ // Prefer unsigned due to no c.lb in Zcb.
+ def : LdPat<relaxed_load<atomic_load_aext_8>, LBU, i16>;
+ def : LdPat<relaxed_load<atomic_load_nonext_16>, LH, i16>;
- def : StPat<atomic_store_8, SB, GPR, i16>;
- def : StPat<atomic_store_16, SH, GPR, i16>;
+ def : StPat<relaxed_store<atomic_store_8>, SB, GPR, i16>;
+ def : StPat<relaxed_store<atomic_store_16>, SH, GPR, i16>;
}
let Predicates = [HasAtomicLdSt, IsRV64] in {
- def : LdPat<atomic_load_nonext_32, LW, i32>;
- def : StPat<atomic_store_32, SW, GPR, i32>;
+ // Load pattern is in RISCVInstrInfoA.td and shared with RV32.
+ def : StPat<relaxed_store<atomic_store_32>, SW, GPR, i32>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 90e1c47a..6a6ead2 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -70,6 +70,10 @@ static unsigned getSEWOpNum(const MachineInstr &MI) {
return RISCVII::getSEWOpNum(MI.getDesc());
}
+static unsigned getVecPolicyOpNum(const MachineInstr &MI) {
+ return RISCVII::getVecPolicyOpNum(MI.getDesc());
+}
+
/// Get the EEW for a load or store instruction. Return std::nullopt if MI is
/// not a load or store which ignores SEW.
static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
@@ -986,7 +990,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
// If there is a policy operand, use it.
if (RISCVII::hasVecPolicyOp(TSFlags)) {
- const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1);
+ const MachineOperand &Op = MI.getOperand(getVecPolicyOpNum(MI));
uint64_t Policy = Op.getImm();
assert(Policy <=
(RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC) &&
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 70b6c7e..1e6b04f8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -3793,6 +3793,11 @@ bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
return false;
// Operands 1 and 2 are commutable, if we switch the opcode.
return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
+ case RISCV::QC_SELECTIEQ:
+ case RISCV::QC_SELECTINE:
+ case RISCV::QC_SELECTIIEQ:
+ case RISCV::QC_SELECTIINE:
+ return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
case RISCV::QC_MVEQ:
case RISCV::QC_MVNE:
case RISCV::QC_MVLT:
@@ -4018,6 +4023,11 @@ MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, OpIdx1,
OpIdx2);
}
+ case RISCV::QC_SELECTIEQ:
+ case RISCV::QC_SELECTINE:
+ case RISCV::QC_SELECTIIEQ:
+ case RISCV::QC_SELECTIINE:
+ return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
case RISCV::QC_MVEQ:
case RISCV::QC_MVNE:
case RISCV::QC_MVLT:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
index 59f5aeb..25accd9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
@@ -165,23 +165,23 @@ class seq_cst_store<PatFrag base>
// any ordering. This is necessary because AtomicExpandPass has added fences to
// atomic load/stores and changed them to unordered ones.
let Predicates = [HasAtomicLdSt] in {
- def : LdPat<relaxed_load<atomic_load_asext_8>, LB>;
+ // Use unsigned for aext due to no c.lb in Zcb.
+ def : LdPat<relaxed_load<atomic_load_sext_8>, LB>;
+ def : LdPat<relaxed_load<atomic_load_azext_8>, LBU>;
def : LdPat<relaxed_load<atomic_load_asext_16>, LH>;
- def : LdPat<relaxed_load<atomic_load_zext_8>, LBU>;
- def : LdPat<relaxed_load<atomic_load_zext_16>, LHU>;
+ def : LdPat<relaxed_load<atomic_load_zext_16>, LHU>;
def : StPat<relaxed_store<atomic_store_8>, SB, GPR, XLenVT>;
def : StPat<relaxed_store<atomic_store_16>, SH, GPR, XLenVT>;
def : StPat<relaxed_store<atomic_store_32>, SW, GPR, XLenVT>;
-}
-let Predicates = [HasAtomicLdSt, IsRV32] in {
- def : LdPat<relaxed_load<atomic_load_nonext_32>, LW>;
+ // Used by GISel for RV32 and RV64.
+ def : LdPat<relaxed_load<atomic_load_nonext_32>, LW, i32>;
}
let Predicates = [HasAtomicLdSt, IsRV64] in {
- def : LdPat<relaxed_load<atomic_load_asext_32>, LW>;
- def : LdPat<relaxed_load<atomic_load_zext_32>, LWU>;
+ def : LdPat<relaxed_load<atomic_load_asext_32>, LW, i64>;
+ def : LdPat<relaxed_load<atomic_load_zext_32>, LWU, i64>;
def : LdPat<relaxed_load<atomic_load_nonext_64>, LD, i64>;
def : StPat<relaxed_store<atomic_store_64>, SD, GPR, i64>;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 4eb9a3be..298d35a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -345,7 +345,7 @@ defset list<VTypeInfo> AllVectors = {
}
}
- defset list<VTypeInfo> AllFloatAndBFloatVectors = {
+ defset list<VTypeInfo> AllFloatAndBF16Vectors = {
defset list<VTypeInfo> AllFloatVectors = {
defset list<VTypeInfo> NoGroupFloatVectors = {
defset list<VTypeInfo> FractionalGroupFloatVectors = {
@@ -382,16 +382,16 @@ defset list<VTypeInfo> AllVectors = {
}
}
- defset list<VTypeInfo> AllBFloatVectors = {
- defset list<VTypeInfo> NoGroupBFloatVectors = {
- defset list<VTypeInfo> FractionalGroupBFloatVectors = {
+ defset list<VTypeInfo> AllBF16Vectors = {
+ defset list<VTypeInfo> NoGroupBF16Vectors = {
+ defset list<VTypeInfo> FractionalGroupBF16Vectors = {
def VBF16MF4: VTypeInfo<vbfloat16mf4_t, vbool64_t, 16, V_MF4, bf16, FPR16>;
def VBF16MF2: VTypeInfo<vbfloat16mf2_t, vbool32_t, 16, V_MF2, bf16, FPR16>;
}
def VBF16M1: VTypeInfo<vbfloat16m1_t, vbool16_t, 16, V_M1, bf16, FPR16>;
}
- defset list<GroupVTypeInfo> GroupBFloatVectors = {
+ defset list<GroupVTypeInfo> GroupBF16Vectors = {
def VBF16M2: GroupVTypeInfo<vbfloat16m2_t, vbfloat16m1_t, vbool8_t, 16,
V_M2, bf16, FPR16>;
def VBF16M4: GroupVTypeInfo<vbfloat16m4_t, vbfloat16m1_t, vbool4_t, 16,
@@ -542,7 +542,7 @@ defset list<VTypeInfoToWide> AllWidenableIntToFloatVectors = {
def : VTypeInfoToWide<VI32M4, VF64M8>;
}
-defset list<VTypeInfoToWide> AllWidenableBFloatToFloatVectors = {
+defset list<VTypeInfoToWide> AllWidenableBF16ToFloatVectors = {
def : VTypeInfoToWide<VBF16MF4, VF32MF2>;
def : VTypeInfoToWide<VBF16MF2, VF32M1>;
def : VTypeInfoToWide<VBF16M1, VF32M2>;
@@ -554,7 +554,8 @@ defset list<VTypeInfoToWide> AllWidenableBFloatToFloatVectors = {
// This represents the information we need in codegen for each pseudo.
// The definition should be consistent with `struct PseudoInfo` in
// RISCVInstrInfo.h.
-class RISCVVPseudo<dag outs, dag ins, list<dag> pattern = [], string opcodestr = "", string argstr = "">
+class RISCVVPseudo<dag outs, dag ins, list<dag> pattern = [],
+ string opcodestr = "", string argstr = "">
: Pseudo<outs, ins, pattern, opcodestr, argstr> {
Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
Instruction BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
@@ -1010,8 +1011,7 @@ class VPseudoNullaryNoMask<VReg RegClass> :
class VPseudoNullaryMask<VReg RegClass> :
RISCVVPseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
(ins GetVRegNoV0<RegClass>.R:$passthru,
- VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy),
- []> {
+ VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1190,8 +1190,7 @@ class VPseudoBinaryNoMask<VReg RetClass,
bits<2> TargetConstraintType = 1,
DAGOperand sewop = sew> :
RISCVVPseudo<(outs RetClass:$rd),
- (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, sewop:$sew),
- []> {
+ (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, sewop:$sew)> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1227,8 +1226,7 @@ class VPseudoBinaryNoMaskRoundingMode<VReg RetClass,
bits<2> TargetConstraintType = 1> :
RISCVVPseudo<(outs RetClass:$rd),
(ins RetClass:$passthru, Op1Class:$rs2, Op2Class:$rs1,
- vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy),
- []> {
+ vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy)> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1320,7 +1318,7 @@ class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered>:
RISCVVPseudo<(outs),
(ins StClass:$rd, GPRMemZeroOffset:$rs1, IdxClass:$rs2,
- AVL:$vl, sew:$sew),[]>,
+ AVL:$vl, sew:$sew)>,
RISCVVSX</*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
@@ -1333,7 +1331,7 @@ class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered>:
RISCVVPseudo<(outs),
(ins StClass:$rd, GPRMemZeroOffset:$rs1, IdxClass:$rs2,
- VMaskOp:$vm, AVL:$vl, sew:$sew),[]>,
+ VMaskOp:$vm, AVL:$vl, sew:$sew)>,
RISCVVSX</*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
@@ -1351,8 +1349,7 @@ class VPseudoBinaryMaskPolicy<VReg RetClass,
RISCVVPseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$passthru,
Op1Class:$rs2, Op2Class:$rs1,
- VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy),
- []> {
+ VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1371,8 +1368,7 @@ class VPseudoTernaryMaskPolicy<VReg RetClass,
RISCVVPseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$passthru,
Op1Class:$rs2, Op2Class:$rs1,
- VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy),
- []> {
+ VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1414,8 +1410,7 @@ class VPseudoBinaryMOutMask<VReg RetClass,
RISCVVPseudo<(outs RetClass:$rd),
(ins RetClass:$passthru,
Op1Class:$rs2, Op2Class:$rs1,
- VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy),
- []> {
+ VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1438,8 +1433,7 @@ class VPseudoTiedBinaryMask<VReg RetClass,
RISCVVPseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$passthru,
Op2Class:$rs1,
- VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy),
- []> {
+ VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1546,8 +1540,7 @@ class VPseudoTernaryNoMaskWithPolicyRoundingMode<VReg RetClass,
bits<2> TargetConstraintType = 1> :
RISCVVPseudo<(outs RetClass:$rd),
(ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
- vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy),
- []> {
+ vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy)> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1716,8 +1709,8 @@ class VPseudoUSSegStoreNoMask<VReg ValClass,
int EEW,
bits<4> NF> :
RISCVVPseudo<(outs),
- (ins ValClass:$rd, GPRMemZeroOffset:$rs1, AVL:$vl, sew:$sew),
- []>,
+ (ins ValClass:$rd, GPRMemZeroOffset:$rs1, AVL:$vl,
+ sew:$sew)>,
RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
let mayLoad = 0;
let mayStore = 1;
@@ -5870,7 +5863,7 @@ multiclass VPatConversionWF_VF<string intrinsic, string instruction,
multiclass VPatConversionWF_VF_BF<string intrinsic, string instruction,
bit isSEWAware = 0> {
- foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in
+ foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in
{
defvar fvti = fvtiToFWti.Vti;
defvar fwti = fvtiToFWti.Wti;
@@ -5977,7 +5970,7 @@ multiclass VPatConversionVF_WF_RTZ<string intrinsic, string instruction,
multiclass VPatConversionVF_WF_BF_RM<string intrinsic, string instruction,
bit isSEWAware = 0> {
- foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in {
+ foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in {
defvar fvti = fvtiToFWti.Vti;
defvar fwti = fvtiToFWti.Wti;
let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
@@ -6029,9 +6022,9 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
PseudoInstExpansion<(CSRRS GPR:$rd, SysRegVLENB.Encoding, X0)>,
Sched<[WriteRdVLENB]>;
let Defs = [VL, VTYPE] in {
- def PseudoReadVLENBViaVSETVLIX0 : Pseudo<(outs GPRNoX0:$rd), (ins uimm5:$shamt),
- []>,
- Sched<[WriteVSETVLI, ReadVSETVLI]>;
+ def PseudoReadVLENBViaVSETVLIX0 : Pseudo<(outs GPRNoX0:$rd),
+ (ins uimm5:$shamt), []>,
+ Sched<[WriteVSETVLI, ReadVSETVLI]>;
}
}
@@ -6694,14 +6687,14 @@ defm PseudoVID : VPseudoVID_V;
let Predicates = [HasVInstructions] in {
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
let HasSEWOp = 1, BaseInstr = VMV_X_S in
- def PseudoVMV_X_S:
+ def PseudoVMV_X_S :
RISCVVPseudo<(outs GPR:$rd), (ins VR:$rs2, sew:$sew)>,
Sched<[WriteVMovXS, ReadVMovXS]>;
let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X, isReMaterializable = 1,
Constraints = "$rd = $passthru" in
- def PseudoVMV_S_X: RISCVVPseudo<(outs VR:$rd),
- (ins VR:$passthru, GPR:$rs1, AVL:$vl, sew:$sew),
- []>,
+ def PseudoVMV_S_X :
+ RISCVVPseudo<(outs VR:$rd),
+ (ins VR:$passthru, GPR:$rs1, AVL:$vl, sew:$sew)>,
Sched<[WriteVMovSX, ReadVMovSX_V, ReadVMovSX_X]>;
}
} // Predicates = [HasVInstructions]
@@ -6721,8 +6714,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
Constraints = "$rd = $passthru" in
def "PseudoVFMV_S_" # f.FX :
RISCVVPseudo<(outs VR:$rd),
- (ins VR:$passthru, f.fprclass:$rs1, AVL:$vl, sew:$sew),
- []>,
+ (ins VR:$passthru, f.fprclass:$rs1, AVL:$vl, sew:$sew)>,
Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>;
}
}
@@ -7154,7 +7146,7 @@ defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
// We can use vmerge.vvm to support vector-vector vfmerge.
// NOTE: Clang previously used int_riscv_vfmerge for vector-vector, but now uses
// int_riscv_vmerge. Support both for compatibility.
-foreach vti = AllFloatAndBFloatVectors in {
+foreach vti = AllFloatAndBF16Vectors in {
let Predicates = GetVTypeMinimalPredicates<vti>.Predicates in
defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM",
vti.Vector,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index dc61361..139ff92 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -1388,7 +1388,7 @@ defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">;
// Floating-point vselects:
// 11.15. Vector Integer Merge Instructions
// 13.15. Vector Floating-Point Merge Instruction
-foreach fvti = AllFloatAndBFloatVectors in {
+foreach fvti = AllFloatAndBF16Vectors in {
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
let Predicates = GetVTypePredicates<ivti>.Predicates in {
def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 1511f1b..cf904ea 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -2426,7 +2426,7 @@ foreach vti = AllFloatVectors in {
// Floating-point vselects:
// 11.15. Vector Integer Merge Instructions
// 13.15. Vector Floating-Point Merge Instruction
-foreach fvti = AllFloatAndBFloatVectors in {
+foreach fvti = AllFloatAndBF16Vectors in {
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
let Predicates = GetVTypePredicates<ivti>.Predicates in {
def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
@@ -2770,7 +2770,7 @@ foreach vti = NoGroupFloatVectors in {
}
}
-foreach vti = AllFloatAndBFloatVectors in {
+foreach vti = AllFloatAndBF16Vectors in {
defvar ivti = GetIntVTypeInfo<vti>.Vti;
let Predicates = GetVTypePredicates<ivti>.Predicates in {
def : Pat<(vti.Vector
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
index 9835c03..b683e89 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
@@ -560,7 +560,7 @@ multiclass VPseudoVNCVT_BF16_S {
}
multiclass VPatConversionS_BF16<string intrinsic, string instruction> {
- foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in {
+ foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in {
defvar fvti = fvtiToFWti.Vti;
defvar fwti = fvtiToFWti.Wti;
let Predicates = [HasVendorXAndesVBFHCvt] in
@@ -572,7 +572,7 @@ multiclass VPatConversionS_BF16<string intrinsic, string instruction> {
}
multiclass VPatConversionBF16_S<string intrinsic, string instruction> {
- foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in {
+ foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in {
defvar fvti = fvtiToFWti.Vti;
defvar fwti = fvtiToFWti.Wti;
let Predicates = [HasVendorXAndesVBFHCvt] in
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
index b546339..557d873 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
@@ -770,7 +770,7 @@ multiclass VPatVQMACCQOQ<string intrinsic, string instruction, string kind>
: VPatVMACC<intrinsic, instruction, kind, VQMACCQOQInfoPairs, vint8m1_t>;
multiclass VPatVFWMACC<string intrinsic, string instruction, string kind>
- : VPatVMACC<intrinsic, instruction, kind, AllWidenableBFloatToFloatVectors,
+ : VPatVMACC<intrinsic, instruction, kind, AllWidenableBF16ToFloatVectors,
vbfloat16m1_t>;
defset list<VTypeInfoToWide> VFNRCLIPInfoPairs = {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
index ff4a040..efdbd12 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
@@ -524,7 +524,7 @@ class QCIRVInstRI<bits<1> funct1, DAGOperand InTyImm11,
let Inst{30-20} = imm11;
}
-let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCommutable = 1 in
class QCISELECTIICC<bits<3> funct3, string opcodestr>
: RVInstR4<0b00, funct3, OPC_CUSTOM_2, (outs GPRNoX0:$rd_wb),
(ins GPRNoX0:$rd, GPRNoX0:$rs1, simm5:$simm1, simm5:$simm2),
@@ -537,7 +537,7 @@ class QCISELECTIICC<bits<3> funct3, string opcodestr>
let rs2 = simm1;
}
-let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCommutable = 1 in
class QCISELECTICC<bits<3> funct3, string opcodestr>
: RVInstR4<0b01, funct3, OPC_CUSTOM_2, (outs GPRNoX0:$rd_wb),
(ins GPRNoX0:$rd, GPRNoX0:$rs1, GPRNoX0:$rs2, simm5:$simm2),
@@ -1350,6 +1350,10 @@ class QCIMVCCIPat<CondCode Cond, QCIMVCCI Inst, DAGOperand InTyImm>
: Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rs1), InTyImm:$imm, Cond, (i32 GPRNoX0:$rs3), (i32 GPRNoX0:$rd))),
(Inst GPRNoX0:$rd, GPRNoX0:$rs1, InTyImm:$imm, GPRNoX0:$rs3)>;
+class QCIMVCCIZeroPat<CondCode Cond, QCIMVCCI Inst>
+ : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rs1), (i32 0), Cond, (i32 GPRNoX0:$rs3), (i32 GPRNoX0:$rd))),
+ (Inst GPRNoX0:$rd, GPRNoX0:$rs1, 0, GPRNoX0:$rs3)>;
+
class QCISELECTCCIPat<CondCode Cond, QCISELECTCCI Inst>
: Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rd), simm5:$imm, Cond, (i32 GPRNoX0:$rs2), (i32 GPRNoX0:$rs3))),
(Inst GPRNoX0:$rd, simm5:$imm, GPRNoX0:$rs2, GPRNoX0:$rs3)>;
@@ -1538,14 +1542,7 @@ def: Pat<(i32 (ctlz (not (i32 GPR:$rs1)))), (QC_CLO GPR:$rs1)>;
let Predicates = [HasVendorXqciint, IsRV32] in
def : Pat<(riscv_mileaveret_glue), (QC_C_MILEAVERET)>;
-let Predicates = [HasVendorXqcicm, IsRV32] in {
-// (SELECT X, Y, Z) is canonicalised to `(riscv_selectcc x, 0, NE, y, z)`.
-// This exists to prioritise over the `Select_GPR_Using_CC_GPR` pattern.
-def : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rs1), (i32 0), SETNE, (i32 GPRNoX0:$rs3), (i32 GPRNoX0:$rd))),
- (QC_MVNEI GPRNoX0:$rd, GPRNoX0:$rs1, 0, GPRNoX0:$rs3)>;
-def : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rs1), (i32 0), SETEQ, (i32 GPRNoX0:$rs3), (i32 GPRNoX0:$rd))),
- (QC_MVEQI GPRNoX0:$rd, GPRNoX0:$rs1, 0, GPRNoX0:$rs3)>;
-
+let Predicates = [HasVendorXqcicm, NoShortForwardBranchOpt, IsRV32] in {
def : QCIMVCCPat<SETEQ, QC_MVEQ>;
def : QCIMVCCPat<SETNE, QC_MVNE>;
def : QCIMVCCPat<SETLT, QC_MVLT>;
@@ -1553,12 +1550,24 @@ def : QCIMVCCPat<SETULT, QC_MVLTU>;
def : QCIMVCCPat<SETGE, QC_MVGE>;
def : QCIMVCCPat<SETUGE, QC_MVGEU>;
-def : QCIMVCCIPat<SETEQ, QC_MVEQI, simm5>;
-def : QCIMVCCIPat<SETNE, QC_MVNEI, simm5>;
-def : QCIMVCCIPat<SETLT, QC_MVLTI, simm5>;
-def : QCIMVCCIPat<SETULT, QC_MVLTUI, uimm5>;
-def : QCIMVCCIPat<SETGE, QC_MVGEI, simm5>;
-def : QCIMVCCIPat<SETUGE, QC_MVGEUI, uimm5>;
+// These exist to prioritise over the `Select_GPR_Using_CC_GPR` pattern for X0.
+def : QCIMVCCIZeroPat<SETEQ, QC_MVEQI>;
+def : QCIMVCCIZeroPat<SETNE, QC_MVNEI>;
+def : QCIMVCCIZeroPat<SETLT, QC_MVLTI>;
+def : QCIMVCCIZeroPat<SETULT, QC_MVLTUI>;
+def : QCIMVCCIZeroPat<SETGE, QC_MVGEI>;
+def : QCIMVCCIZeroPat<SETUGE, QC_MVGEUI>;
+}
+
+let Predicates = [HasVendorXqcicm, IsRV32] in {
+// These all use *imm5nonzero because we want to use PseudoCCMOVGPR with X0 when SFB is enabled.
+// When SFB is not enabled, the `QCIMVCCIZeroPat`s above will be used if RHS=0.
+def : QCIMVCCIPat<SETEQ, QC_MVEQI, simm5nonzero>;
+def : QCIMVCCIPat<SETNE, QC_MVNEI, simm5nonzero>;
+def : QCIMVCCIPat<SETLT, QC_MVLTI, simm5nonzero>;
+def : QCIMVCCIPat<SETULT, QC_MVLTUI, uimm5nonzero>;
+def : QCIMVCCIPat<SETGE, QC_MVGEI, simm5nonzero>;
+def : QCIMVCCIPat<SETUGE, QC_MVGEUI, uimm5nonzero>;
}
let Predicates = [HasVendorXqcicli, IsRV32] in {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td
index 5e013b4..1674c95 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td
@@ -63,13 +63,14 @@ defm SD : SRL_r_aq_rl<0b011, "sd">;
//===----------------------------------------------------------------------===//
class PatLAQ<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT>
- : Pat<(vt (OpNode (vt GPRMemZeroOffset:$rs1))), (Inst GPRMemZeroOffset:$rs1)>;
+ : Pat<(vt (OpNode (XLenVT GPRMemZeroOffset:$rs1))),
+ (Inst GPRMemZeroOffset:$rs1)>;
// n.b. this switches order of arguments
// to deal with the fact that SRL has addr, data
// while atomic_store has data, addr
class PatSRL<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT>
- : Pat<(OpNode (vt GPR:$rs2), (vt GPRMemZeroOffset:$rs1)),
+ : Pat<(OpNode (vt GPR:$rs2), (XLenVT GPRMemZeroOffset:$rs1)),
(Inst GPRMemZeroOffset:$rs1, GPR:$rs2)>;
@@ -97,16 +98,15 @@ let Predicates = [HasStdExtZalasr] in {
let Predicates = [HasStdExtZalasr, IsRV32] in {
def : PatLAQ<acquiring_load<atomic_load_nonext_32>, LW_AQ>;
def : PatLAQ<seq_cst_load<atomic_load_nonext_32>, LW_AQ>;
-
-} // Predicates = [HasStdExtZalasr, IsRV64]
+} // Predicates = [HasStdExtZalasr, IsRV32]
let Predicates = [HasStdExtZalasr, IsRV64] in {
- def : PatLAQ<acquiring_load<atomic_load_asext_32>, LW_AQ>;
- def : PatLAQ<seq_cst_load<atomic_load_asext_32>, LW_AQ>;
+ def : PatLAQ<acquiring_load<atomic_load_asext_32>, LW_AQ, i64>;
+ def : PatLAQ<seq_cst_load<atomic_load_asext_32>, LW_AQ, i64>;
- def : PatLAQ<acquiring_load<atomic_load_nonext_64>, LD_AQ>;
- def : PatLAQ<seq_cst_load<atomic_load_nonext_64>, LD_AQ>;
+ def : PatLAQ<acquiring_load<atomic_load_nonext_64>, LD_AQ, i64>;
+ def : PatLAQ<seq_cst_load<atomic_load_nonext_64>, LD_AQ, i64>;
- def : PatSRL<releasing_store<atomic_store_64>, SD_RL>;
- def : PatSRL<seq_cst_store<atomic_store_64>, SD_RL>;
+ def : PatSRL<releasing_store<atomic_store_64>, SD_RL, i64>;
+ def : PatSRL<seq_cst_store<atomic_store_64>, SD_RL, i64>;
} // Predicates = [HasStdExtZalasr, IsRV64]
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
index 6d8672b..0be9eab 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
@@ -53,7 +53,7 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in {
defm : VPatConversionVF_WF_BF_RM<"int_riscv_vfncvtbf16_f_f_w",
"PseudoVFNCVTBF16_F_F", isSEWAware=1>;
- foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in {
+ foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in {
defvar fvti = fvtiToFWti.Vti;
defvar fwti = fvtiToFWti.Wti;
let Predicates = [HasVInstructionsBF16Minimal] in
@@ -91,9 +91,9 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in {
let Predicates = [HasStdExtZvfbfwma] in {
defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmaccbf16", "PseudoVFWMACCBF16",
- AllWidenableBFloatToFloatVectors, isSEWAware=1>;
+ AllWidenableBF16ToFloatVectors, isSEWAware=1>;
defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmadd_vl, "PseudoVFWMACCBF16",
- AllWidenableBFloatToFloatVectors>;
+ AllWidenableBF16ToFloatVectors>;
defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACCBF16",
- AllWidenableBFloatToFloatVectors>;
+ AllWidenableBF16ToFloatVectors>;
}
diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td
index 95f8a87..17a7948 100644
--- a/llvm/lib/Target/RISCV/RISCVProcessors.td
+++ b/llvm/lib/Target/RISCV/RISCVProcessors.td
@@ -347,16 +347,58 @@ defvar SiFiveP400TuneFeatures = [TuneNoDefaultUnroll,
TunePostRAScheduler];
def SIFIVE_P450 : RISCVProcessorModel<"sifive-p450", SiFiveP400Model,
- !listconcat(RVA22U64Features,
- [FeatureStdExtZifencei,
+ [Feature64Bit,
+ FeatureStdExtI,
+ FeatureStdExtM,
+ FeatureStdExtA,
+ FeatureStdExtF,
+ FeatureStdExtD,
+ FeatureStdExtC,
+ FeatureStdExtZicsr,
+ FeatureStdExtZiccif,
+ FeatureStdExtZiccrse,
+ FeatureStdExtZiccamoa,
+ FeatureStdExtZicclsm,
+ FeatureStdExtZa64rs,
+ FeatureStdExtZihpm,
+ FeatureStdExtZihintpause,
+ FeatureStdExtB,
+ FeatureStdExtZic64b,
+ FeatureStdExtZicbom,
+ FeatureStdExtZicbop,
+ FeatureStdExtZicboz,
+ FeatureStdExtZfhmin,
+ FeatureStdExtZkt,
+ FeatureStdExtZifencei,
FeatureStdExtZihintntl,
FeatureUnalignedScalarMem,
- FeatureUnalignedVectorMem]),
+ FeatureUnalignedVectorMem],
SiFiveP400TuneFeatures>;
def SIFIVE_P470 : RISCVProcessorModel<"sifive-p470", SiFiveP400Model,
- !listconcat(RVA22U64Features,
- [FeatureStdExtV,
+ [Feature64Bit,
+ FeatureStdExtI,
+ FeatureStdExtM,
+ FeatureStdExtA,
+ FeatureStdExtF,
+ FeatureStdExtD,
+ FeatureStdExtC,
+ FeatureStdExtZicsr,
+ FeatureStdExtZiccif,
+ FeatureStdExtZiccrse,
+ FeatureStdExtZiccamoa,
+ FeatureStdExtZicclsm,
+ FeatureStdExtZa64rs,
+ FeatureStdExtZihpm,
+ FeatureStdExtZihintpause,
+ FeatureStdExtB,
+ FeatureStdExtZic64b,
+ FeatureStdExtZicbom,
+ FeatureStdExtZicbop,
+ FeatureStdExtZicboz,
+ FeatureStdExtZfhmin,
+ FeatureStdExtZkt,
+ FeatureStdExtV,
FeatureStdExtZifencei,
FeatureStdExtZihintntl,
FeatureStdExtZvl128b,
@@ -368,7 +410,7 @@ def SIFIVE_P470 : RISCVProcessorModel<"sifive-p470", SiFiveP400Model,
FeatureVendorXSiFivecdiscarddlone,
FeatureVendorXSiFivecflushdlone,
FeatureUnalignedScalarMem,
- FeatureUnalignedVectorMem]),
+ FeatureUnalignedVectorMem],
!listconcat(SiFiveP400TuneFeatures,
[TuneNoSinkSplatOperands,
TuneVXRMPipelineFlush])>;
@@ -397,8 +439,29 @@ def SIFIVE_P550 : RISCVProcessorModel<"sifive-p550", SiFiveP500Model,
}
def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", SiFiveP600Model,
- !listconcat(RVA22U64Features,
- [FeatureStdExtV,
+ [Feature64Bit,
+ FeatureStdExtI,
+ FeatureStdExtM,
+ FeatureStdExtA,
+ FeatureStdExtF,
+ FeatureStdExtD,
+ FeatureStdExtC,
+ FeatureStdExtZicsr,
+ FeatureStdExtZiccif,
+ FeatureStdExtZiccrse,
+ FeatureStdExtZiccamoa,
+ FeatureStdExtZicclsm,
+ FeatureStdExtZa64rs,
+ FeatureStdExtZihpm,
+ FeatureStdExtZihintpause,
+ FeatureStdExtB,
+ FeatureStdExtZic64b,
+ FeatureStdExtZicbom,
+ FeatureStdExtZicbop,
+ FeatureStdExtZicboz,
+ FeatureStdExtZfhmin,
+ FeatureStdExtZkt,
+ FeatureStdExtV,
FeatureStdExtZifencei,
FeatureStdExtZihintntl,
FeatureStdExtZvl128b,
@@ -408,7 +471,7 @@ def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", SiFiveP600Model,
FeatureStdExtZvksc,
FeatureStdExtZvksg,
FeatureUnalignedScalarMem,
- FeatureUnalignedVectorMem]),
+ FeatureUnalignedVectorMem],
[TuneNoDefaultUnroll,
TuneConditionalCompressedMoveFusion,
TuneLUIADDIFusion,
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index d4124ae..ee25f69 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -3139,8 +3139,8 @@ bool RISCVTTIImpl::isProfitableToSinkOperands(
bool IsVPSplat = match(Op, m_Intrinsic<Intrinsic::experimental_vp_splat>(
m_Value(), m_Value(), m_Value()));
if (!IsVPSplat &&
- !match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
- m_Undef(), m_ZeroMask())))
+ !match(Op, m_Shuffle(m_InsertElt(m_Value(), m_Value(), m_ZeroInt()),
+ m_Value(), m_ZeroMask())))
continue;
// Don't sink i1 splats.
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 273edf3..0afec42 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -752,6 +752,8 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
case TargetOpcode::G_FEXP2:
return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
+ case TargetOpcode::G_FMODF:
+ return selectModf(ResVReg, ResType, I);
case TargetOpcode::G_FLOG:
return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
@@ -3453,9 +3455,6 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_discard: {
return selectDiscard(ResVReg, ResType, I);
}
- case Intrinsic::modf: {
- return selectModf(ResVReg, ResType, I);
- }
default: {
std::string DiagMsg;
raw_string_ostream OS(DiagMsg);
@@ -4268,6 +4267,7 @@ bool SPIRVInstructionSelector::selectModf(Register ResVReg,
PtrTyReg,
LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
GR.getPointerSize()));
+
// Assign SPIR-V type of the pointer type of the alloca variable to the
// new register.
GR.assignSPIRVTypeToVReg(PtrType, PtrTyReg, MIRBuilder.getMF());
@@ -4280,10 +4280,7 @@ bool SPIRVInstructionSelector::selectModf(Register ResVReg,
.addUse(GR.getSPIRVTypeID(PtrType))
.addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function));
Register Variable = AllocaMIB->getOperand(0).getReg();
- // Modf must have 4 operands, the first two are the 2 parts of the result,
- // the third is the operand, and the last one is the floating point value.
- assert(I.getNumOperands() == 4 &&
- "Expected 4 operands for modf instruction");
+
MachineBasicBlock &BB = *I.getParent();
// Create the OpenCLLIB::modf instruction.
auto MIB =
@@ -4293,8 +4290,8 @@ bool SPIRVInstructionSelector::selectModf(Register ResVReg,
.addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
.addImm(CL::modf)
.setMIFlags(I.getFlags())
- .add(I.getOperand(3)) // Floating point value.
- .addUse(Variable); // Pointer to integral part.
+ .add(I.getOperand(I.getNumExplicitDefs())) // Floating point value.
+ .addUse(Variable); // Pointer to integral part.
// Assign the integral part stored in the ptr to the second element of the
// result.
Register IntegralPartReg = I.getOperand(1).getReg();
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp
index aea3397..205895e 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp
@@ -39,6 +39,7 @@ private:
void collectBindingInfo(Module &M);
uint32_t getAndReserveFirstUnusedBinding(uint32_t DescSet);
void replaceImplicitBindingCalls(Module &M);
+ void verifyUniqueOrderIdPerResource(SmallVectorImpl<CallInst *> &Calls);
// A map from descriptor set to a bit vector of used binding numbers.
std::vector<BitVector> UsedBindings;
@@ -94,6 +95,33 @@ void SPIRVLegalizeImplicitBinding::collectBindingInfo(Module &M) {
});
}
+void SPIRVLegalizeImplicitBinding::verifyUniqueOrderIdPerResource(
+ SmallVectorImpl<CallInst *> &Calls) {
+ // Check that the order Id is unique per resource.
+ for (uint32_t i = 1; i < Calls.size(); ++i) {
+ const uint32_t OrderIdArgIdx = 0;
+ const uint32_t DescSetArgIdx = 1;
+ const uint32_t OrderA =
+ cast<ConstantInt>(Calls[i - 1]->getArgOperand(OrderIdArgIdx))
+ ->getZExtValue();
+ const uint32_t OrderB =
+ cast<ConstantInt>(Calls[i]->getArgOperand(OrderIdArgIdx))
+ ->getZExtValue();
+ if (OrderA == OrderB) {
+ const uint32_t DescSetA =
+ cast<ConstantInt>(Calls[i - 1]->getArgOperand(DescSetArgIdx))
+ ->getZExtValue();
+ const uint32_t DescSetB =
+ cast<ConstantInt>(Calls[i]->getArgOperand(DescSetArgIdx))
+ ->getZExtValue();
+ if (DescSetA != DescSetB) {
+ report_fatal_error("Implicit binding calls with the same order ID must "
+ "have the same descriptor set");
+ }
+ }
+ }
+}
+
uint32_t SPIRVLegalizeImplicitBinding::getAndReserveFirstUnusedBinding(
uint32_t DescSet) {
if (UsedBindings.size() <= DescSet) {
@@ -112,11 +140,23 @@ uint32_t SPIRVLegalizeImplicitBinding::getAndReserveFirstUnusedBinding(
}
void SPIRVLegalizeImplicitBinding::replaceImplicitBindingCalls(Module &M) {
+ uint32_t lastOrderId = -1;
+ uint32_t lastBindingNumber = -1;
+
for (CallInst *OldCI : ImplicitBindingCalls) {
IRBuilder<> Builder(OldCI);
+ const uint32_t OrderId =
+ cast<ConstantInt>(OldCI->getArgOperand(0))->getZExtValue();
const uint32_t DescSet =
cast<ConstantInt>(OldCI->getArgOperand(1))->getZExtValue();
- const uint32_t NewBinding = getAndReserveFirstUnusedBinding(DescSet);
+
+ // Reuse an existing binding for this order ID, if one was already assigned.
+ // Otherwise, assign a new binding.
+ const uint32_t NewBinding = (lastOrderId == OrderId)
+ ? lastBindingNumber
+ : getAndReserveFirstUnusedBinding(DescSet);
+ lastOrderId = OrderId;
+ lastBindingNumber = NewBinding;
SmallVector<Value *, 8> Args;
Args.push_back(Builder.getInt32(DescSet));
@@ -142,6 +182,7 @@ bool SPIRVLegalizeImplicitBinding::runOnModule(Module &M) {
if (ImplicitBindingCalls.empty()) {
return false;
}
+ verifyUniqueOrderIdPerResource(ImplicitBindingCalls);
replaceImplicitBindingCalls(M);
return true;
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
index db85e33..53074ea 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
@@ -300,6 +300,7 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
getActionDefinitionsBuilder({G_STRICT_FSQRT,
G_FPOW,
G_FEXP,
+ G_FMODF,
G_FEXP2,
G_FLOG,
G_FLOG2,
diff --git a/llvm/lib/Target/TargetMachine.cpp b/llvm/lib/Target/TargetMachine.cpp
index ad7e503..cf85691 100644
--- a/llvm/lib/Target/TargetMachine.cpp
+++ b/llvm/lib/Target/TargetMachine.cpp
@@ -27,7 +27,7 @@
#include "llvm/Target/TargetLoweringObjectFile.h"
using namespace llvm;
-cl::opt<bool> NoKernelInfoEndLTO(
+cl::opt<bool> llvm::NoKernelInfoEndLTO(
"no-kernel-info-end-lto",
cl::desc("remove the kernel-info pass at the end of the full LTO pipeline"),
cl::init(false), cl::Hidden);
diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index 2cfdc75..a068138 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -957,6 +957,8 @@ const char *VETargetLowering::getTargetNodeName(unsigned Opcode) const {
EVT VETargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
EVT VT) const {
+ if (VT.isVector())
+ return VT.changeVectorElementType(MVT::i1);
return MVT::i32;
}
diff --git a/llvm/lib/Target/X86/X86FixupSetCC.cpp b/llvm/lib/Target/X86/X86FixupSetCC.cpp
index 2de89947..ea93a57 100644
--- a/llvm/lib/Target/X86/X86FixupSetCC.cpp
+++ b/llvm/lib/Target/X86/X86FixupSetCC.cpp
@@ -136,6 +136,12 @@ bool X86FixupSetCCPass::runOnMachineFunction(MachineFunction &MF) {
.addReg(ZeroReg)
.addReg(Reg0)
.addImm(X86::sub_8bit);
+
+ // Redirect the debug-instr-number to the setcc.
+ if (unsigned InstrNum = ZExt->peekDebugInstrNum())
+ MF.makeDebugValueSubstitution({InstrNum, 0},
+ {MI.getDebugInstrNum(), 0});
+
ToErase.push_back(ZExt);
}
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index cd04ff5..3802506 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -44615,8 +44615,11 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
APInt DemandedMask = OriginalDemandedBits << ShAmt;
- // If we just want the sign bit then we don't need to shift it.
- if (OriginalDemandedBits.isSignMask())
+ // If we only want bits that already match the signbit then we don't need
+ // to shift.
+ unsigned NumHiDemandedBits = BitWidth - OriginalDemandedBits.countr_zero();
+ if (TLO.DAG.ComputeNumSignBits(Op0, OriginalDemandedElts, Depth + 1) >=
+ NumHiDemandedBits)
return TLO.CombineTo(Op, Op0);
// fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
@@ -45169,6 +45172,18 @@ bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
case X86ISD::Wrapper:
case X86ISD::WrapperRIP:
return true;
+ case X86ISD::PACKSS:
+ case X86ISD::PACKUS: {
+ APInt DemandedLHS, DemandedRHS;
+ getPackDemandedElts(Op.getSimpleValueType(), DemandedElts, DemandedLHS,
+ DemandedRHS);
+ return (!DemandedLHS ||
+ DAG.isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedLHS,
+ PoisonOnly, Depth + 1)) &&
+ (!DemandedRHS ||
+ DAG.isGuaranteedNotToBeUndefOrPoison(Op.getOperand(1), DemandedRHS,
+ PoisonOnly, Depth + 1));
+ }
case X86ISD::INSERTPS:
case X86ISD::BLENDI:
case X86ISD::PSHUFB:
@@ -45239,6 +45254,10 @@ bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode(
case X86ISD::BLENDI:
case X86ISD::BLENDV:
return false;
+ // SSE packs.
+ case X86ISD::PACKSS:
+ case X86ISD::PACKUS:
+ return false;
// SSE target shuffles.
case X86ISD::INSERTPS:
case X86ISD::PSHUFB:
@@ -45438,7 +45457,8 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
const SDLoc &DL,
const X86Subtarget &Subtarget) {
EVT SrcVT = Src.getValueType();
- if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
+ if (Subtarget.useSoftFloat() || !SrcVT.isSimple() ||
+ SrcVT.getScalarType() != MVT::i1)
return SDValue();
// Recognize the IR pattern for the movmsk intrinsic under SSE1 before type
@@ -52369,16 +52389,41 @@ static SDValue combineAddOrSubToADCOrSBB(bool IsSub, const SDLoc &DL, EVT VT,
// Do not flip "e > c", where "c" is a constant, because Cmp instruction
// cannot take an immediate as its first operand.
//
- if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
- EFLAGS.getValueType().isInteger() &&
- !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
- SDValue NewSub =
- DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
- EFLAGS.getOperand(1), EFLAGS.getOperand(0));
- SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
+ // If EFLAGS is from a CMP that compares the same operands as the earlier
+ // SUB producing X (i.e. CMP X, Y), we can directly use the carry flag with
+ // SBB/ADC without creating a flipped SUB.
+ if (EFLAGS.getOpcode() == X86ISD::CMP &&
+ EFLAGS.getValueType().isInteger() && X == EFLAGS.getOperand(0)) {
return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
DAG.getVTList(VT, MVT::i32), X,
- DAG.getConstant(0, DL, VT), NewEFLAGS);
+ DAG.getConstant(0, DL, VT), EFLAGS);
+ }
+
+ if (EFLAGS.getOpcode() == X86ISD::SUB &&
+ EFLAGS.getValueType().isInteger() &&
+ !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
+ // Only create NewSub if we know one of the folds will succeed to avoid
+ // introducing a temporary node that may persist and affect one-use checks
+ // below.
+ if (EFLAGS.getNode()->hasOneUse()) {
+ SDValue NewSub = DAG.getNode(
+ X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
+ EFLAGS.getOperand(1), EFLAGS.getOperand(0));
+ SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
+ return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
+ DAG.getVTList(VT, MVT::i32), X,
+ DAG.getConstant(0, DL, VT), NewEFLAGS);
+ }
+
+ if (IsSub && X == EFLAGS.getValue(0)) {
+ SDValue NewSub = DAG.getNode(
+ X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
+ EFLAGS.getOperand(1), EFLAGS.getOperand(0));
+ SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
+ return DAG.getNode(X86ISD::SBB, DL, DAG.getVTList(VT, MVT::i32),
+ EFLAGS.getOperand(0), EFLAGS.getOperand(1),
+ NewEFLAGS);
+ }
}
}
diff --git a/llvm/lib/Target/X86/X86LowerAMXType.cpp b/llvm/lib/Target/X86/X86LowerAMXType.cpp
index 278ae46..0ba71ad 100644
--- a/llvm/lib/Target/X86/X86LowerAMXType.cpp
+++ b/llvm/lib/Target/X86/X86LowerAMXType.cpp
@@ -854,6 +854,7 @@ public:
: Func(F), SC(ShapeC), DT(nullptr) {}
bool combineCastStore(IntrinsicInst *Cast, StoreInst *ST);
bool combineLoadCast(IntrinsicInst *Cast, LoadInst *LD);
+ bool combineTilezero(IntrinsicInst *Cast);
bool combineLdSt(SmallVectorImpl<Instruction *> &Casts);
bool combineAMXcast(TargetLibraryInfo *TLI);
bool transformAMXCast(IntrinsicInst *AMXCast);
@@ -1175,6 +1176,26 @@ bool X86LowerAMXCast::combineLoadCast(IntrinsicInst *Cast, LoadInst *LD) {
return EraseLoad;
}
+// %19 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> zeroinitializer)
+// -->
+// %19 = tail call x86_amx @llvm.x86.tilezero.internal(i16 %row, i16 %col)
+bool X86LowerAMXCast::combineTilezero(IntrinsicInst *Cast) {
+ Value *Row = nullptr, *Col = nullptr;
+ Use &U = *(Cast->use_begin());
+ unsigned OpNo = U.getOperandNo();
+ auto *II = cast<IntrinsicInst>(U.getUser());
+ if (!isAMXIntrinsic(II))
+ return false;
+
+ std::tie(Row, Col) = SC->getShape(II, OpNo);
+
+ IRBuilder<> Builder(Cast);
+ Value *NewInst =
+ Builder.CreateIntrinsic(Intrinsic::x86_tilezero_internal, {}, {Row, Col});
+ Cast->replaceAllUsesWith(NewInst);
+ return true;
+}
+
bool X86LowerAMXCast::combineLdSt(SmallVectorImpl<Instruction *> &Casts) {
bool Change = false;
for (auto *Cast : Casts) {
@@ -1198,6 +1219,14 @@ bool X86LowerAMXCast::combineLdSt(SmallVectorImpl<Instruction *> &Casts) {
for (auto *Store : DeadStores)
Store->eraseFromParent();
} else { // x86_cast_vector_to_tile
+ // %19 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> zeroinitializer)
+ // -->
+ // %19 = tail call x86_amx @llvm.x86.tilezero.internal(i16 %row, i16 %col)
+ if (isa<ConstantAggregateZero>(Cast->getOperand(0))) {
+ Change |= combineTilezero(cast<IntrinsicInst>(Cast));
+ continue;
+ }
+
auto *Load = dyn_cast<LoadInst>(Cast->getOperand(0));
if (!Load || !Load->hasOneUse())
continue;
@@ -1210,6 +1239,7 @@ bool X86LowerAMXCast::combineLdSt(SmallVectorImpl<Instruction *> &Casts) {
// Set the operand is null so that load instruction can be erased.
Cast->setOperand(0, nullptr);
Load->eraseFromParent();
+ Change = true;
}
}
}
diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp
index 83aa7de..28ee444 100644
--- a/llvm/lib/Transforms/IPO/FunctionImport.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp
@@ -72,6 +72,7 @@ STATISTIC(NumImportedModules, "Number of modules imported from");
STATISTIC(NumDeadSymbols, "Number of dead stripped symbols in index");
STATISTIC(NumLiveSymbols, "Number of live symbols in index");
+namespace llvm {
cl::opt<bool>
ForceImportAll("force-import-all", cl::init(false), cl::Hidden,
cl::desc("Import functions with noinline attribute"));
@@ -185,9 +186,8 @@ static cl::opt<bool> CtxprofMoveRootsToOwnModule(
extern cl::list<GlobalValue::GUID> MoveSymbolGUID;
-namespace llvm {
extern cl::opt<bool> EnableMemProfContextDisambiguation;
-}
+} // end namespace llvm
// Load lazily a module from \p FileName in \p Context.
static std::unique_ptr<Module> loadFile(const std::string &FileName,
diff --git a/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp b/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp
index 4f53738..150a2dc 100644
--- a/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp
@@ -28,10 +28,13 @@ using namespace llvm;
STATISTIC(NumSpecsCreated, "Number of specializations created");
+namespace llvm {
+
static cl::opt<bool> ForceSpecialization(
- "force-specialization", cl::init(false), cl::Hidden, cl::desc(
- "Force function specialization for every call site with a constant "
- "argument"));
+ "force-specialization", cl::init(false), cl::Hidden,
+ cl::desc(
+ "Force function specialization for every call site with a constant "
+ "argument"));
static cl::opt<unsigned> MaxClones(
"funcspec-max-clones", cl::init(3), cl::Hidden, cl::desc(
@@ -91,6 +94,8 @@ static cl::opt<bool> SpecializeLiteralConstant(
extern cl::opt<bool> ProfcheckDisableMetadataFixes;
+} // end namespace llvm
+
bool InstCostVisitor::canEliminateSuccessor(BasicBlock *BB,
BasicBlock *Succ) const {
unsigned I = 0;
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index f88d51f..99c4982 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -1680,7 +1680,9 @@ processGlobal(GlobalValue &GV,
/// FastCC.
static void ChangeCalleesToFastCall(Function *F) {
for (User *U : F->users())
- cast<CallBase>(U)->setCallingConv(CallingConv::Fast);
+ if (auto *Call = dyn_cast<CallBase>(U))
+ if (Call->getCalledOperand() == F)
+ Call->setCallingConv(CallingConv::Fast);
}
static AttributeList StripAttr(LLVMContext &C, AttributeList Attrs,
@@ -1766,10 +1768,12 @@ isValidCandidateForColdCC(Function &F,
return false;
for (User *U : F.users()) {
- CallBase &CB = cast<CallBase>(*U);
- Function *CallerFunc = CB.getParent()->getParent();
+ CallBase *CB = dyn_cast<CallBase>(U);
+ if (!CB || CB->getCalledOperand() != &F)
+ continue;
+ Function *CallerFunc = CB->getParent()->getParent();
BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc);
- if (!isColdCallSite(CB, CallerBFI))
+ if (!isColdCallSite(*CB, CallerBFI))
return false;
if (!llvm::is_contained(AllCallsCold, CallerFunc))
return false;
@@ -1779,7 +1783,9 @@ isValidCandidateForColdCC(Function &F,
static void changeCallSitesToColdCC(Function *F) {
for (User *U : F->users())
- cast<CallBase>(U)->setCallingConv(CallingConv::Cold);
+ if (auto *Call = dyn_cast<CallBase>(U))
+ if (Call->getCalledOperand() == F)
+ Call->setCallingConv(CallingConv::Cold);
}
// This function iterates over all the call instructions in the input Function
diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
index 15f4d76..ddb95a4 100644
--- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
+++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
@@ -214,11 +214,12 @@ static cl::opt<bool> MemProfRequireDefinitionForPromotion(
"memprof-require-definition-for-promotion", cl::init(false), cl::Hidden,
cl::desc(
"Require target function definition when promoting indirect calls"));
-} // namespace llvm
extern cl::opt<bool> MemProfReportHintedSizes;
extern cl::opt<unsigned> MinClonedColdBytePercent;
+} // namespace llvm
+
namespace {
/// CRTP base for graphs built from either IR or ThinLTO summary index.
///
@@ -3980,7 +3981,6 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones(
void ModuleCallsiteContextGraph::updateAllocationCall(
CallInfo &Call, AllocationType AllocType) {
std::string AllocTypeString = getAllocTypeAttributeString(AllocType);
- removeAnyExistingAmbiguousAttribute(cast<CallBase>(Call.call()));
auto A = llvm::Attribute::get(Call.call()->getFunction()->getContext(),
"memprof", AllocTypeString);
cast<CallBase>(Call.call())->addFnAttr(A);
@@ -5642,7 +5642,6 @@ bool MemProfContextDisambiguation::applyImport(Module &M) {
// clone J-1 (J==0 is the original clone and does not have a VMaps
// entry).
CBClone = cast<CallBase>((*VMaps[J - 1])[CB]);
- removeAnyExistingAmbiguousAttribute(CBClone);
CBClone->addFnAttr(A);
ORE.emit(OptimizationRemark(DEBUG_TYPE, "MemprofAttribute", CBClone)
<< ore::NV("AllocationCall", CBClone) << " in clone "
diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp
index 5bc7e34..e39e311 100644
--- a/llvm/lib/Transforms/IPO/SampleProfile.cpp
+++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp
@@ -116,6 +116,8 @@ STATISTIC(
NumCSInlinedHitGrowthLimit,
"Number of functions with FDO inline stopped due to growth size limit");
+namespace llvm {
+
// Command line option to specify the file to read samples from. This is
// mainly used for debugging.
static cl::opt<std::string> SampleProfileFile(
@@ -198,7 +200,6 @@ static cl::opt<bool> DisableSampleLoaderInlining(
"pass, and merge (or scale) profiles (as configured by "
"--sample-profile-merge-inlinee)."));
-namespace llvm {
cl::opt<bool>
SortProfiledSCC("sort-profiled-scc-member", cl::init(true), cl::Hidden,
cl::desc("Sort profiled recursion by edge weights."));
@@ -1664,8 +1665,9 @@ void SampleProfileLoader::generateMDProfMetadata(Function &F) {
else if (OverwriteExistingWeights)
I.setMetadata(LLVMContext::MD_prof, nullptr);
} else if (!isa<IntrinsicInst>(&I)) {
- setBranchWeights(I, {static_cast<uint32_t>(BlockWeights[BB])},
- /*IsExpected=*/false);
+ setBranchWeights(
+ I, ArrayRef<uint32_t>{static_cast<uint32_t>(BlockWeights[BB])},
+ /*IsExpected=*/false);
}
}
} else if (OverwriteExistingWeights || ProfileSampleBlockAccurate) {
@@ -1676,7 +1678,8 @@ void SampleProfileLoader::generateMDProfMetadata(Function &F) {
if (cast<CallBase>(I).isIndirectCall()) {
I.setMetadata(LLVMContext::MD_prof, nullptr);
} else {
- setBranchWeights(I, {uint32_t(0)}, /*IsExpected=*/false);
+ setBranchWeights(I, ArrayRef<uint32_t>{uint32_t(0)},
+ /*IsExpected=*/false);
}
}
}
diff --git a/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp b/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp
index 093a39e..70b8614 100644
--- a/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp
+++ b/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp
@@ -23,6 +23,8 @@ using namespace sampleprof;
#define DEBUG_TYPE "sample-profile-matcher"
+namespace llvm {
+
static cl::opt<unsigned> FuncProfileSimilarityThreshold(
"func-profile-similarity-threshold", cl::Hidden, cl::init(80),
cl::desc("Consider a profile matches a function if the similarity of their "
@@ -55,6 +57,8 @@ static cl::opt<unsigned> SalvageStaleProfileMaxCallsites(
cl::desc("The maximum number of callsites in a function, above which stale "
"profile matching will be skipped."));
+} // end namespace llvm
+
void SampleProfileMatcher::findIRAnchors(const Function &F,
AnchorMap &IRAnchors) const {
// For inlined code, recover the original callsite and callee by finding the
diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
index 09bffa7..ac41fdd 100644
--- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -120,6 +120,8 @@ STATISTIC(NumVirtConstProp1Bit,
"Number of 1 bit virtual constant propagations");
STATISTIC(NumVirtConstProp, "Number of virtual constant propagations");
+namespace llvm {
+
static cl::opt<PassSummaryAction> ClSummaryAction(
"wholeprogramdevirt-summary-action",
cl::desc("What to do with the summary when running this pass"),
@@ -175,6 +177,8 @@ static cl::list<std::string>
extern cl::opt<bool> ProfcheckDisableMetadataFixes;
+} // end namespace llvm
+
/// With Clang, a pure virtual class's deleting destructor is emitted as a
/// `llvm.trap` intrinsic followed by an unreachable IR instruction. In the
/// context of whole program devirtualization, the deleting destructor of a pure
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 4b7793f..9b272c4 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -3080,6 +3080,13 @@ InstCombinerImpl::convertOrOfShiftsToFunnelShift(Instruction &Or) {
assert(ZextLowShlAmt->uge(HighSize) &&
ZextLowShlAmt->ule(Width - LowSize) && "Invalid concat");
+ // We cannot reuse the result if it may produce poison.
+ // Drop poison generating flags in the expression tree.
+ // Or
+ cast<Instruction>(U)->dropPoisonGeneratingFlags();
+ // Shl
+ cast<Instruction>(X)->dropPoisonGeneratingFlags();
+
FShiftArgs = {U, U, ConstantInt::get(Or0->getType(), *ZextHighShlAmt)};
break;
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index e4cb4574..07ad65c 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -5780,6 +5780,45 @@ Instruction *InstCombinerImpl::foldICmpWithMinMax(Instruction &I,
return nullptr;
}
+/// Match and fold patterns like:
+/// icmp eq/ne X, min(max(X, Lo), Hi)
+/// which represents a range check and can be repsented as a ConstantRange.
+///
+/// For icmp eq, build ConstantRange [Lo, Hi + 1) and convert to:
+/// (X - Lo) u< (Hi + 1 - Lo)
+/// For icmp ne, build ConstantRange [Hi + 1, Lo) and convert to:
+/// (X - (Hi + 1)) u< (Lo - (Hi + 1))
+Instruction *InstCombinerImpl::foldICmpWithClamp(ICmpInst &I, Value *X,
+ MinMaxIntrinsic *Min) {
+ if (!I.isEquality() || !Min->hasOneUse() || !Min->isMin())
+ return nullptr;
+
+ const APInt *Lo = nullptr, *Hi = nullptr;
+ if (Min->isSigned()) {
+ if (!match(Min->getLHS(), m_OneUse(m_SMax(m_Specific(X), m_APInt(Lo)))) ||
+ !match(Min->getRHS(), m_APInt(Hi)) || !Lo->slt(*Hi))
+ return nullptr;
+ } else {
+ if (!match(Min->getLHS(), m_OneUse(m_UMax(m_Specific(X), m_APInt(Lo)))) ||
+ !match(Min->getRHS(), m_APInt(Hi)) || !Lo->ult(*Hi))
+ return nullptr;
+ }
+
+ ConstantRange CR = ConstantRange::getNonEmpty(*Lo, *Hi + 1);
+ ICmpInst::Predicate Pred;
+ APInt C, Offset;
+ if (I.getPredicate() == ICmpInst::ICMP_EQ)
+ CR.getEquivalentICmp(Pred, C, Offset);
+ else
+ CR.inverse().getEquivalentICmp(Pred, C, Offset);
+
+ if (!Offset.isZero())
+ X = Builder.CreateAdd(X, ConstantInt::get(X->getType(), Offset));
+
+ return replaceInstUsesWith(
+ I, Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), C)));
+}
+
// Canonicalize checking for a power-of-2-or-zero value:
static Instruction *foldICmpPow2Test(ICmpInst &I,
InstCombiner::BuilderTy &Builder) {
@@ -7467,10 +7506,14 @@ Instruction *InstCombinerImpl::foldICmpCommutative(CmpPredicate Pred,
if (Instruction *NI = foldSelectICmp(Pred, SI, Op1, CxtI))
return NI;
- if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op0))
+ if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op0)) {
if (Instruction *Res = foldICmpWithMinMax(CxtI, MinMax, Op1, Pred))
return Res;
+ if (Instruction *Res = foldICmpWithClamp(CxtI, Op1, MinMax))
+ return Res;
+ }
+
{
Value *X;
const APInt *C;
@@ -8527,6 +8570,9 @@ static Instruction *foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI,
DenormalMode::getIEEE()) {
CI.replaceOperand(I, 0, X);
CI.replaceOperand(I, 1, Y);
+ I.setHasNoInfs(LHSI->hasNoInfs());
+ if (LHSI->hasNoNaNs())
+ I.setHasNoNaNs(true);
return &I;
}
break;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 4f94aa2..e01c145 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -725,6 +725,7 @@ public:
Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ);
Instruction *foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax,
Value *Z, CmpPredicate Pred);
+ Instruction *foldICmpWithClamp(ICmpInst &Cmp, Value *X, MinMaxIntrinsic *Min);
Instruction *foldICmpEquality(ICmpInst &Cmp);
Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I);
Instruction *foldSignBitTest(ICmpInst &I);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index b6b3a95..87000a1 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -2934,32 +2934,6 @@ static Instruction *foldSelectWithSRem(SelectInst &SI, InstCombinerImpl &IC,
return nullptr;
}
-static Value *foldSelectWithFrozenICmp(SelectInst &Sel, InstCombiner::BuilderTy &Builder) {
- FreezeInst *FI = dyn_cast<FreezeInst>(Sel.getCondition());
- if (!FI)
- return nullptr;
-
- Value *Cond = FI->getOperand(0);
- Value *TrueVal = Sel.getTrueValue(), *FalseVal = Sel.getFalseValue();
-
- // select (freeze(x == y)), x, y --> y
- // select (freeze(x != y)), x, y --> x
- // The freeze should be only used by this select. Otherwise, remaining uses of
- // the freeze can observe a contradictory value.
- // c = freeze(x == y) ; Let's assume that y = poison & x = 42; c is 0 or 1
- // a = select c, x, y ;
- // f(a, c) ; f(poison, 1) cannot happen, but if a is folded
- // ; to y, this can happen.
- CmpPredicate Pred;
- if (FI->hasOneUse() &&
- match(Cond, m_c_ICmp(Pred, m_Specific(TrueVal), m_Specific(FalseVal))) &&
- (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE)) {
- return Pred == ICmpInst::ICMP_EQ ? FalseVal : TrueVal;
- }
-
- return nullptr;
-}
-
/// Given that \p CondVal is known to be \p CondIsTrue, try to simplify \p SI.
static Value *simplifyNestedSelectsUsingImpliedCond(SelectInst &SI,
Value *CondVal,
@@ -4446,9 +4420,6 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) {
if (Instruction *PN = foldSelectToPhi(SI, DT, Builder))
return replaceInstUsesWith(SI, PN);
- if (Value *Fr = foldSelectWithFrozenICmp(SI, Builder))
- return replaceInstUsesWith(SI, Fr);
-
if (Value *V = foldRoundUpIntegerWithPow2Alignment(SI, Builder))
return replaceInstUsesWith(SI, V);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 6ef3066..18a45c6 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -319,20 +319,20 @@ Instruction *InstCombinerImpl::foldBitcastExtElt(ExtractElementInst &Ext) {
return nullptr;
}
-/// Find elements of V demanded by UserInstr.
-static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) {
+/// Find elements of V demanded by UserInstr. If returns false, we were not able
+/// to determine all elements.
+static bool findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr,
+ APInt &UnionUsedElts) {
unsigned VWidth = cast<FixedVectorType>(V->getType())->getNumElements();
- // Conservatively assume that all elements are needed.
- APInt UsedElts(APInt::getAllOnes(VWidth));
-
switch (UserInstr->getOpcode()) {
case Instruction::ExtractElement: {
ExtractElementInst *EEI = cast<ExtractElementInst>(UserInstr);
assert(EEI->getVectorOperand() == V);
ConstantInt *EEIIndexC = dyn_cast<ConstantInt>(EEI->getIndexOperand());
if (EEIIndexC && EEIIndexC->getValue().ult(VWidth)) {
- UsedElts = APInt::getOneBitSet(VWidth, EEIIndexC->getZExtValue());
+ UnionUsedElts.setBit(EEIIndexC->getZExtValue());
+ return true;
}
break;
}
@@ -341,23 +341,23 @@ static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) {
unsigned MaskNumElts =
cast<FixedVectorType>(UserInstr->getType())->getNumElements();
- UsedElts = APInt(VWidth, 0);
- for (unsigned i = 0; i < MaskNumElts; i++) {
- unsigned MaskVal = Shuffle->getMaskValue(i);
+ for (auto I : llvm::seq(MaskNumElts)) {
+ unsigned MaskVal = Shuffle->getMaskValue(I);
if (MaskVal == -1u || MaskVal >= 2 * VWidth)
continue;
if (Shuffle->getOperand(0) == V && (MaskVal < VWidth))
- UsedElts.setBit(MaskVal);
+ UnionUsedElts.setBit(MaskVal);
if (Shuffle->getOperand(1) == V &&
((MaskVal >= VWidth) && (MaskVal < 2 * VWidth)))
- UsedElts.setBit(MaskVal - VWidth);
+ UnionUsedElts.setBit(MaskVal - VWidth);
}
- break;
+ return true;
}
default:
break;
}
- return UsedElts;
+
+ return false;
}
/// Find union of elements of V demanded by all its users.
@@ -370,7 +370,8 @@ static APInt findDemandedEltsByAllUsers(Value *V) {
APInt UnionUsedElts(VWidth, 0);
for (const Use &U : V->uses()) {
if (Instruction *I = dyn_cast<Instruction>(U.getUser())) {
- UnionUsedElts |= findDemandedEltsBySingleUser(V, I);
+ if (!findDemandedEltsBySingleUser(V, I, UnionUsedElts))
+ return APInt::getAllOnes(VWidth);
} else {
UnionUsedElts = APInt::getAllOnes(VWidth);
break;
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 8fbaf68..917004c 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -132,9 +132,11 @@ STATISTIC(NumReassoc , "Number of reassociations");
DEBUG_COUNTER(VisitCounter, "instcombine-visit",
"Controls which instructions are visited");
-static cl::opt<bool>
-EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
- cl::init(true));
+namespace llvm {
+
+static cl::opt<bool> EnableCodeSinking("instcombine-code-sinking",
+ cl::desc("Enable code sinking"),
+ cl::init(true));
static cl::opt<unsigned> MaxSinkNumUsers(
"instcombine-max-sink-users", cl::init(32),
@@ -156,6 +158,8 @@ extern cl::opt<bool> ProfcheckDisableMetadataFixes;
static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
cl::Hidden, cl::init(true));
+} // end namespace llvm
+
std::optional<Instruction *>
InstCombiner::targetInstCombineIntrinsic(IntrinsicInst &II) {
// Handle target specific intrinsics
@@ -5169,6 +5173,7 @@ Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) {
// - or: pick -1
// - select's condition: if the true value is constant, choose it by making
// the condition true.
+ // - phi: pick the common constant across operands
// - default: pick 0
//
// Note that this transform is intentionally done here rather than
@@ -5179,17 +5184,43 @@ Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) {
// TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
// duplicating logic for binops at least.
auto getUndefReplacement = [&](Type *Ty) {
- Value *BestValue = nullptr;
+ auto pickCommonConstantFromPHI = [](PHINode &PN) -> Value * {
+ // phi(freeze(undef), C, C). Choose C for freeze so the PHI can be
+ // removed.
+ Constant *BestValue = nullptr;
+ for (Value *V : PN.incoming_values()) {
+ if (match(V, m_Freeze(m_Undef())))
+ continue;
+
+ Constant *C = dyn_cast<Constant>(V);
+ if (!C)
+ return nullptr;
+
+ if (!isGuaranteedNotToBeUndefOrPoison(C))
+ return nullptr;
+
+ if (BestValue && BestValue != C)
+ return nullptr;
+
+ BestValue = C;
+ }
+ return BestValue;
+ };
+
Value *NullValue = Constant::getNullValue(Ty);
- for (const auto *U : I.users()) {
+ Value *BestValue = nullptr;
+ for (auto *U : I.users()) {
Value *V = NullValue;
if (match(U, m_Or(m_Value(), m_Value())))
V = ConstantInt::getAllOnesValue(Ty);
else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
V = ConstantInt::getTrue(Ty);
else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) {
- if (!isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT))
+ if (V == &I || !isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT))
V = NullValue;
+ } else if (auto *PHI = dyn_cast<PHINode>(U)) {
+ if (Value *MaybeV = pickCommonConstantFromPHI(*PHI))
+ V = MaybeV;
}
if (!BestValue)
@@ -5198,6 +5229,7 @@ Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) {
BestValue = NullValue;
}
assert(BestValue && "Must have at least one use");
+ assert(BestValue != &I && "Cannot replace with itself");
return BestValue;
};
diff --git a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
index f451c2b..cf87e35 100644
--- a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
+++ b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
@@ -55,11 +55,11 @@ using namespace llvm;
STATISTIC(NumOfPGOICallPromotion, "Number of indirect call promotions.");
STATISTIC(NumOfPGOICallsites, "Number of indirect call candidate sites.");
+namespace llvm {
extern cl::opt<unsigned> MaxNumVTableAnnotations;
-namespace llvm {
extern cl::opt<bool> EnableVTableProfileUse;
-}
+} // namespace llvm
// Command line option to disable indirect-call promotion with the default as
// false. This is for debug purpose.
@@ -672,8 +672,8 @@ CallBase &llvm::pgo::promoteIndirectCall(CallBase &CB, Function *DirectCallee,
createBranchWeights(CB.getContext(), Count, TotalCount - Count));
if (AttachProfToDirectCall)
- setBranchWeights(NewInst, {static_cast<uint32_t>(Count)},
- /*IsExpected=*/false);
+ setFittedBranchWeights(NewInst, {Count},
+ /*IsExpected=*/false);
using namespace ore;
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index cf076b9a..eff6f0c 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1923,20 +1923,17 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
///
/// Shadow = ParamTLS+ArgOffset.
Value *getShadowPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
- Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
- if (ArgOffset)
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg");
+ return IRB.CreatePtrAdd(MS.ParamTLS,
+ ConstantInt::get(MS.IntptrTy, ArgOffset), "_msarg");
}
/// Compute the origin address for a given function argument.
Value *getOriginPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
if (!MS.TrackOrigins)
return nullptr;
- Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
- if (ArgOffset)
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg_o");
+ return IRB.CreatePtrAdd(MS.ParamOriginTLS,
+ ConstantInt::get(MS.IntptrTy, ArgOffset),
+ "_msarg_o");
}
/// Compute the shadow address for a retval.
@@ -7219,9 +7216,8 @@ struct VarArgHelperBase : public VarArgHelper {
/// Compute the shadow address for a given va_arg.
Value *getShadowPtrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
- Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, MS.PtrTy, "_msarg_va_s");
+ return IRB.CreatePtrAdd(
+ MS.VAArgTLS, ConstantInt::get(MS.IntptrTy, ArgOffset), "_msarg_va_s");
}
/// Compute the shadow address for a given va_arg.
@@ -7235,12 +7231,12 @@ struct VarArgHelperBase : public VarArgHelper {
/// Compute the origin address for a given va_arg.
Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) {
- Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
// getOriginPtrForVAArgument() is always called after
// getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
// overflow.
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, MS.PtrTy, "_msarg_va_o");
+ return IRB.CreatePtrAdd(MS.VAArgOriginTLS,
+ ConstantInt::get(MS.IntptrTy, ArgOffset),
+ "_msarg_va_o");
}
void CleanUnusedTLS(IRBuilder<> &IRB, Value *ShadowBase,
@@ -7467,10 +7463,8 @@ struct VarArgAMD64Helper : public VarArgHelperBase {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
- Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, 16)),
- MS.PtrTy);
+ Value *RegSaveAreaPtrPtr =
+ IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 16));
Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
const Align Alignment = Align(16);
@@ -7482,10 +7476,8 @@ struct VarArgAMD64Helper : public VarArgHelperBase {
if (MS.TrackOrigins)
IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
Alignment, AMD64FpEndOffset);
- Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, 8)),
- MS.PtrTy);
+ Value *OverflowArgAreaPtrPtr =
+ IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 8));
Value *OverflowArgAreaPtr =
IRB.CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
@@ -7615,19 +7607,15 @@ struct VarArgAArch64Helper : public VarArgHelperBase {
// Retrieve a va_list field of 'void*' size.
Value *getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
- Value *SaveAreaPtrPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, offset)),
- MS.PtrTy);
+ Value *SaveAreaPtrPtr =
+ IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
}
// Retrieve a va_list field of 'int' size.
Value *getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
- Value *SaveAreaPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, offset)),
- MS.PtrTy);
+ Value *SaveAreaPtr =
+ IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
}
diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
index d9e850e..120c4f6 100644
--- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
+++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
@@ -222,7 +222,6 @@ cl::opt<bool> NoPGOWarnMismatchComdatWeak(
cl::desc("The option is used to turn on/off "
"warnings about hash mismatch for comdat "
"or weak functions."));
-} // namespace llvm
// Command line option to enable/disable select instruction instrumentation.
static cl::opt<bool>
@@ -347,7 +346,6 @@ cl::list<std::string> CtxPGOSkipCallsiteInstrument(
extern cl::opt<unsigned> MaxNumVTableAnnotations;
-namespace llvm {
// Command line option to turn on CFG dot dump after profile annotation.
// Defined in Analysis/BlockFrequencyInfo.cpp: -pgo-view-counts
extern cl::opt<PGOViewCountsType> PGOViewCounts;
diff --git a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
index 343bec3..a5f417a 100644
--- a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
+++ b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
@@ -54,6 +54,8 @@ using namespace llvm;
STATISTIC(NumOfPGOMemOPOpt, "Number of memop intrinsics optimized.");
STATISTIC(NumOfPGOMemOPAnnotate, "Number of memop intrinsics annotated.");
+namespace llvm {
+
// The minimum call count to optimize memory intrinsic calls.
static cl::opt<unsigned>
MemOPCountThreshold("pgo-memop-count-threshold", cl::Hidden, cl::init(1000),
@@ -93,6 +95,8 @@ static cl::opt<unsigned>
MemOpMaxOptSize("memop-value-prof-max-opt-size", cl::Hidden, cl::init(128),
cl::desc("Optimize the memop size <= this value"));
+} // end namespace llvm
+
namespace {
static const char *getMIName(const MemIntrinsic *MI) {
diff --git a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
index a3d4e53..0534fdd 100644
--- a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
+++ b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
@@ -21,7 +21,9 @@
using namespace llvm;
using CandidateInfo = ValueProfileCollector::CandidateInfo;
+namespace llvm {
extern cl::opt<bool> MemOPOptMemcmpBcmp;
+} // end namespace llvm
///--------------------------- MemIntrinsicPlugin ------------------------------
class MemIntrinsicPlugin : public InstVisitor<MemIntrinsicPlugin> {
diff --git a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
index 944b253..e9a3e98 100644
--- a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
@@ -190,12 +190,12 @@ void unfold(DomTreeUpdater *DTU, LoopInfo *LI, SelectInstToUnfold SIToUnfold,
std::vector<BasicBlock *> *NewBBs) {
SelectInst *SI = SIToUnfold.getInst();
PHINode *SIUse = SIToUnfold.getUse();
- BasicBlock *StartBlock = SI->getParent();
+ assert(SI->hasOneUse());
+ // The select may come indirectly, instead of from where it is defined.
+ BasicBlock *StartBlock = SIUse->getIncomingBlock(*SI->use_begin());
BranchInst *StartBlockTerm =
dyn_cast<BranchInst>(StartBlock->getTerminator());
-
assert(StartBlockTerm);
- assert(SI->hasOneUse());
if (StartBlockTerm->isUnconditional()) {
BasicBlock *EndBlock = StartBlock->getUniqueSuccessor();
@@ -332,7 +332,7 @@ void unfold(DomTreeUpdater *DTU, LoopInfo *LI, SelectInstToUnfold SIToUnfold,
}
// Preserve loop info
- if (Loop *L = LI->getLoopFor(SI->getParent())) {
+ if (Loop *L = LI->getLoopFor(StartBlock)) {
for (BasicBlock *NewBB : *NewBBs)
L->addBasicBlockToLoop(NewBB, *LI);
}
@@ -533,6 +533,8 @@ private:
return false;
// Only fold the select coming from directly where it is defined.
+ // TODO: We have dealt with the select coming indirectly now. This
+ // constraint can be relaxed.
PHINode *PHIUser = dyn_cast<PHINode>(SIUse);
if (PHIUser && PHIUser->getIncomingBlock(*SI->use_begin()) != SIBB)
return false;
diff --git a/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp b/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp
index 2025fbb..3c14036e 100644
--- a/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp
+++ b/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp
@@ -26,6 +26,8 @@
using namespace llvm;
+namespace llvm {
+
static cl::opt<unsigned>
JumpTableSizeThreshold("jump-table-to-switch-size-threshold", cl::Hidden,
cl::desc("Only split jump tables with size less or "
@@ -43,6 +45,8 @@ static cl::opt<unsigned> FunctionSizeThreshold(
extern cl::opt<bool> ProfcheckDisableMetadataFixes;
+} // end namespace llvm
+
#define DEBUG_TYPE "jump-table-to-switch"
namespace {
@@ -201,14 +205,12 @@ PreservedAnalyses JumpTableToSwitchPass::run(Function &F,
PostDominatorTree *PDT = AM.getCachedResult<PostDominatorTreeAnalysis>(F);
DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy);
bool Changed = false;
- InstrProfSymtab Symtab;
- if (auto E = Symtab.create(*F.getParent()))
- F.getContext().emitError(
- "Could not create indirect call table, likely corrupted IR" +
- toString(std::move(E)));
- DenseMap<const Function *, GlobalValue::GUID> FToGuid;
- for (const auto &[G, FPtr] : Symtab.getIDToNameMap())
- FToGuid.insert({FPtr, G});
+ auto FuncToGuid = [&](const Function &Fct) {
+ if (Fct.getMetadata(AssignGUIDPass::GUIDMetadataName))
+ return AssignGUIDPass::getGUID(Fct);
+
+ return Function::getGUIDAssumingExternalLinkage(getIRPGOFuncName(F, InLTO));
+ };
for (BasicBlock &BB : make_early_inc_range(F)) {
BasicBlock *CurrentBB = &BB;
@@ -230,12 +232,8 @@ PreservedAnalyses JumpTableToSwitchPass::run(Function &F,
std::optional<JumpTableTy> JumpTable = parseJumpTable(GEP, PtrTy);
if (!JumpTable)
continue;
- SplittedOutTail = expandToSwitch(
- Call, *JumpTable, DTU, ORE, [&](const Function &Fct) {
- if (Fct.getMetadata(AssignGUIDPass::GUIDMetadataName))
- return AssignGUIDPass::getGUID(Fct);
- return FToGuid.lookup_or(&Fct, 0U);
- });
+ SplittedOutTail =
+ expandToSwitch(Call, *JumpTable, DTU, ORE, FuncToGuid);
Changed = true;
break;
}
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index bab1f2a..9655173 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -116,6 +116,8 @@ STATISTIC(NumIntAssociationsHoisted,
STATISTIC(NumBOAssociationsHoisted, "Number of invariant BinaryOp expressions "
"reassociated and hoisted out of the loop");
+namespace llvm {
+
/// Memory promotion is enabled by default.
static cl::opt<bool>
DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
@@ -154,7 +156,7 @@ static cl::opt<unsigned> IntAssociationUpperLimit(
// which may not be precise, since optimizeUses is capped. The result is
// correct, but we may not get as "far up" as possible to get which access is
// clobbering the one queried.
-cl::opt<unsigned> llvm::SetLicmMssaOptCap(
+cl::opt<unsigned> SetLicmMssaOptCap(
"licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
"for faster compile. Caps the MemorySSA clobbering calls."));
@@ -162,7 +164,7 @@ cl::opt<unsigned> llvm::SetLicmMssaOptCap(
// Experimentally, memory promotion carries less importance than sinking and
// hoisting. Limit when we do promotion when using MemorySSA, in order to save
// compile time.
-cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
+cl::opt<unsigned> SetLicmMssaNoAccForPromotionCap(
"licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
"effect. When MSSA in LICM is enabled, then this is the maximum "
@@ -171,6 +173,8 @@ cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
extern cl::opt<bool> ProfcheckDisableMetadataFixes;
+} // end namespace llvm
+
static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
const LoopSafetyInfo *SafetyInfo,
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 0874b29..019536ca 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -1598,11 +1598,8 @@ bool LoopIdiomRecognize::optimizeCRCLoop(const PolynomialInfo &Info) {
// crc = (crc << 8) ^ tbl[(iv'th byte of data) ^ (top byte of crc)]
{
auto LoByte = [](IRBuilderBase &Builder, Value *Op, const Twine &Name) {
- Type *OpTy = Op->getType();
- unsigned OpBW = OpTy->getIntegerBitWidth();
- return OpBW > 8
- ? Builder.CreateAnd(Op, ConstantInt::get(OpTy, 0XFF), Name)
- : Op;
+ return Builder.CreateZExtOrTrunc(
+ Op, IntegerType::getInt8Ty(Op->getContext()), Name);
};
auto HiIdx = [LoByte, CRCBW](IRBuilderBase &Builder, Value *Op,
const Twine &Name) {
diff --git a/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp b/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp
index 1a9e16b..d31154f 100644
--- a/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp
+++ b/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp
@@ -17,6 +17,8 @@
using namespace llvm;
+namespace llvm {
+
/// Uses the "source_filename" instead of a Module hash ID for the suffix of
/// promoted locals during LTO. NOTE: This requires that the source filename
/// has a unique name / path to avoid name collisions.
@@ -35,6 +37,8 @@ cl::list<GlobalValue::GUID> MoveSymbolGUID(
"used with the name of contextual profiling roots."),
cl::Hidden);
+} // end namespace llvm
+
FunctionImportGlobalProcessing::FunctionImportGlobalProcessing(
Module &M, const ModuleSummaryIndex &Index,
SetVector<GlobalValue *> *GlobalsToImport, bool ClearDSOLocalOnDeclarations)
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 123881e..21b2652 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -3025,6 +3025,12 @@ static void combineMetadata(Instruction *K, const Instruction *J,
// Preserve !nosanitize if both K and J have it.
K->setMetadata(Kind, JMD);
break;
+ case LLVMContext::MD_captures:
+ K->setMetadata(
+ Kind, MDNode::fromCaptureComponents(
+ K->getContext(), MDNode::toCaptureComponents(JMD) |
+ MDNode::toCaptureComponents(KMD)));
+ break;
}
}
// Set !invariant.group from J if J has it. If both instructions have it
diff --git a/llvm/lib/Transforms/Utils/LoopPeel.cpp b/llvm/lib/Transforms/Utils/LoopPeel.cpp
index 735bad1..e1dcaa85 100644
--- a/llvm/lib/Transforms/Utils/LoopPeel.cpp
+++ b/llvm/lib/Transforms/Utils/LoopPeel.cpp
@@ -883,84 +883,6 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize,
}
}
-struct WeightInfo {
- // Weights for current iteration.
- SmallVector<uint32_t> Weights;
- // Weights to subtract after each iteration.
- const SmallVector<uint32_t> SubWeights;
-};
-
-/// Update the branch weights of an exiting block of a peeled-off loop
-/// iteration.
-/// Let F is a weight of the edge to continue (fallthrough) into the loop.
-/// Let E is a weight of the edge to an exit.
-/// F/(F+E) is a probability to go to loop and E/(F+E) is a probability to
-/// go to exit.
-/// Then, Estimated ExitCount = F / E.
-/// For I-th (counting from 0) peeled off iteration we set the weights for
-/// the peeled exit as (EC - I, 1). It gives us reasonable distribution,
-/// The probability to go to exit 1/(EC-I) increases. At the same time
-/// the estimated exit count in the remainder loop reduces by I.
-/// To avoid dealing with division rounding we can just multiple both part
-/// of weights to E and use weight as (F - I * E, E).
-static void updateBranchWeights(Instruction *Term, WeightInfo &Info) {
- setBranchWeights(*Term, Info.Weights, /*IsExpected=*/false);
- for (auto [Idx, SubWeight] : enumerate(Info.SubWeights))
- if (SubWeight != 0)
- // Don't set the probability of taking the edge from latch to loop header
- // to less than 1:1 ratio (meaning Weight should not be lower than
- // SubWeight), as this could significantly reduce the loop's hotness,
- // which would be incorrect in the case of underestimating the trip count.
- Info.Weights[Idx] =
- Info.Weights[Idx] > SubWeight
- ? std::max(Info.Weights[Idx] - SubWeight, SubWeight)
- : SubWeight;
-}
-
-/// Initialize the weights for all exiting blocks.
-static void initBranchWeights(DenseMap<Instruction *, WeightInfo> &WeightInfos,
- Loop *L) {
- SmallVector<BasicBlock *> ExitingBlocks;
- L->getExitingBlocks(ExitingBlocks);
- for (BasicBlock *ExitingBlock : ExitingBlocks) {
- Instruction *Term = ExitingBlock->getTerminator();
- SmallVector<uint32_t> Weights;
- if (!extractBranchWeights(*Term, Weights))
- continue;
-
- // See the comment on updateBranchWeights() for an explanation of what we
- // do here.
- uint32_t FallThroughWeights = 0;
- uint32_t ExitWeights = 0;
- for (auto [Succ, Weight] : zip(successors(Term), Weights)) {
- if (L->contains(Succ))
- FallThroughWeights += Weight;
- else
- ExitWeights += Weight;
- }
-
- // Don't try to update weights for degenerate case.
- if (FallThroughWeights == 0)
- continue;
-
- SmallVector<uint32_t> SubWeights;
- for (auto [Succ, Weight] : zip(successors(Term), Weights)) {
- if (!L->contains(Succ)) {
- // Exit weights stay the same.
- SubWeights.push_back(0);
- continue;
- }
-
- // Subtract exit weights on each iteration, distributed across all
- // fallthrough edges.
- double W = (double)Weight / (double)FallThroughWeights;
- SubWeights.push_back((uint32_t)(ExitWeights * W));
- }
-
- WeightInfos.insert({Term, {std::move(Weights), std::move(SubWeights)}});
- }
-}
-
/// Clones the body of the loop L, putting it between \p InsertTop and \p
/// InsertBot.
/// \param IterNumber The serial number of the iteration currently being
@@ -1332,11 +1254,6 @@ bool llvm::peelLoop(Loop *L, unsigned PeelCount, bool PeelLast, LoopInfo *LI,
Instruction *LatchTerm =
cast<Instruction>(cast<BasicBlock>(Latch)->getTerminator());
- // If we have branch weight information, we'll want to update it for the
- // newly created branches.
- DenseMap<Instruction *, WeightInfo> Weights;
- initBranchWeights(Weights, L);
-
// Identify what noalias metadata is inside the loop: if it is inside the
// loop, the associated metadata must be cloned for each iteration.
SmallVector<MDNode *, 6> LoopLocalNoAliasDeclScopes;
@@ -1382,11 +1299,6 @@ bool llvm::peelLoop(Loop *L, unsigned PeelCount, bool PeelLast, LoopInfo *LI,
assert(DT.verify(DominatorTree::VerificationLevel::Fast));
#endif
- for (auto &[Term, Info] : Weights) {
- auto *TermCopy = cast<Instruction>(VMap[Term]);
- updateBranchWeights(TermCopy, Info);
- }
-
// Remove Loop metadata from the latch branch instruction
// because it is not the Loop's latch branch anymore.
auto *LatchTermCopy = cast<Instruction>(VMap[LatchTerm]);
@@ -1426,15 +1338,38 @@ bool llvm::peelLoop(Loop *L, unsigned PeelCount, bool PeelLast, LoopInfo *LI,
}
}
- for (const auto &[Term, Info] : Weights) {
- setBranchWeights(*Term, Info.Weights, /*IsExpected=*/false);
- }
-
// Update Metadata for count of peeled off iterations.
unsigned AlreadyPeeled = 0;
if (auto Peeled = getOptionalIntLoopAttribute(L, PeeledCountMetaData))
AlreadyPeeled = *Peeled;
- addStringMetadataToLoop(L, PeeledCountMetaData, AlreadyPeeled + PeelCount);
+ unsigned TotalPeeled = AlreadyPeeled + PeelCount;
+ addStringMetadataToLoop(L, PeeledCountMetaData, TotalPeeled);
+
+ // Update metadata for the estimated trip count. The original branch weight
+ // metadata is already correct for both the remaining loop and the peeled loop
+ // iterations, so do not adjust it.
+ //
+ // For example, consider what happens when peeling 2 iterations from a loop
+ // with an estimated trip count of 10 and inserting them before the remaining
+ // loop. Each of the peeled iterations and each iteration in the remaining
+ // loop still has the same probability of exiting the *entire original* loop
+ // as it did when in the original loop, and thus it should still have the same
+ // branch weights. The peeled iterations' non-zero probabilities of exiting
+ // already appropriately reduce the probability of reaching the remaining
+ // iterations just as they did in the original loop. Trying to also adjust
+ // the remaining loop's branch weights to reflect its new trip count of 8 will
+ // erroneously further reduce its block frequencies. However, in case an
+ // analysis later needs to determine the trip count of the remaining loop
+ // while examining it in isolation without considering the probability of
+ // actually reaching it, we store the new trip count as separate metadata.
+ if (auto EstimatedTripCount = getLoopEstimatedTripCount(L)) {
+ unsigned EstimatedTripCountNew = *EstimatedTripCount;
+ if (EstimatedTripCountNew < TotalPeeled)
+ EstimatedTripCountNew = 0;
+ else
+ EstimatedTripCountNew -= TotalPeeled;
+ setLoopEstimatedTripCount(L, EstimatedTripCountNew);
+ }
if (Loop *ParentLoop = L->getParentLoop())
L = ParentLoop;
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 216bdf4..8bba634 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -95,7 +95,9 @@ using namespace PatternMatch;
#define DEBUG_TYPE "simplifycfg"
-cl::opt<bool> llvm::RequireAndPreserveDomTree(
+namespace llvm {
+
+cl::opt<bool> RequireAndPreserveDomTree(
"simplifycfg-require-and-preserve-domtree", cl::Hidden,
cl::desc(
@@ -205,6 +207,8 @@ static cl::opt<unsigned> MaxJumpThreadingLiveBlocks(
extern cl::opt<bool> ProfcheckDisableMetadataFixes;
+} // end namespace llvm
+
STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps");
STATISTIC(NumLinearMaps,
"Number of switch instructions turned into linear mapping");
@@ -955,33 +959,6 @@ static bool valuesOverlap(std::vector<ValueEqualityComparisonCase> &C1,
return false;
}
-// Set branch weights on SwitchInst. This sets the metadata if there is at
-// least one non-zero weight.
-static void setBranchWeights(SwitchInst *SI, ArrayRef<uint32_t> Weights,
- bool IsExpected) {
- // Check that there is at least one non-zero weight. Otherwise, pass
- // nullptr to setMetadata which will erase the existing metadata.
- MDNode *N = nullptr;
- if (llvm::any_of(Weights, [](uint32_t W) { return W != 0; }))
- N = MDBuilder(SI->getParent()->getContext())
- .createBranchWeights(Weights, IsExpected);
- SI->setMetadata(LLVMContext::MD_prof, N);
-}
-
-// Similar to the above, but for branch and select instructions that take
-// exactly 2 weights.
-static void setBranchWeights(Instruction *I, uint32_t TrueWeight,
- uint32_t FalseWeight, bool IsExpected) {
- assert(isa<BranchInst>(I) || isa<SelectInst>(I));
- // Check that there is at least one non-zero weight. Otherwise, pass
- // nullptr to setMetadata which will erase the existing metadata.
- MDNode *N = nullptr;
- if (TrueWeight || FalseWeight)
- N = MDBuilder(I->getParent()->getContext())
- .createBranchWeights(TrueWeight, FalseWeight, IsExpected);
- I->setMetadata(LLVMContext::MD_prof, N);
-}
-
/// If TI is known to be a terminator instruction and its block is known to
/// only have a single predecessor block, check to see if that predecessor is
/// also a value comparison with the same value, and if that comparison
@@ -1181,16 +1158,6 @@ static void getBranchWeights(Instruction *TI,
}
}
-/// Keep halving the weights until all can fit in uint32_t.
-static void fitWeights(MutableArrayRef<uint64_t> Weights) {
- uint64_t Max = *llvm::max_element(Weights);
- if (Max > UINT_MAX) {
- unsigned Offset = 32 - llvm::countl_zero(Max);
- for (uint64_t &I : Weights)
- I >>= Offset;
- }
-}
-
static void cloneInstructionsIntoPredecessorBlockAndUpdateSSAUses(
BasicBlock *BB, BasicBlock *PredBlock, ValueToValueMapTy &VMap) {
Instruction *PTI = PredBlock->getTerminator();
@@ -1446,14 +1413,9 @@ bool SimplifyCFGOpt::performValueComparisonIntoPredecessorFolding(
for (ValueEqualityComparisonCase &V : PredCases)
NewSI->addCase(V.Value, V.Dest);
- if (PredHasWeights || SuccHasWeights) {
- // Halve the weights if any of them cannot fit in an uint32_t
- fitWeights(Weights);
-
- SmallVector<uint32_t, 8> MDWeights(Weights.begin(), Weights.end());
-
- setBranchWeights(NewSI, MDWeights, /*IsExpected=*/false);
- }
+ if (PredHasWeights || SuccHasWeights)
+ setFittedBranchWeights(*NewSI, Weights, /*IsExpected=*/false,
+ /*ElideAllZero=*/true);
eraseTerminatorAndDCECond(PTI);
@@ -4053,39 +4015,34 @@ static bool performBranchToCommonDestFolding(BranchInst *BI, BranchInst *PBI,
// Try to update branch weights.
uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight;
- SmallVector<uint32_t, 2> MDWeights;
+ SmallVector<uint64_t, 2> MDWeights;
if (extractPredSuccWeights(PBI, BI, PredTrueWeight, PredFalseWeight,
SuccTrueWeight, SuccFalseWeight)) {
- SmallVector<uint64_t, 8> NewWeights;
if (PBI->getSuccessor(0) == BB) {
// PBI: br i1 %x, BB, FalseDest
// BI: br i1 %y, UniqueSucc, FalseDest
// TrueWeight is TrueWeight for PBI * TrueWeight for BI.
- NewWeights.push_back(PredTrueWeight * SuccTrueWeight);
+ MDWeights.push_back(PredTrueWeight * SuccTrueWeight);
// FalseWeight is FalseWeight for PBI * TotalWeight for BI +
// TrueWeight for PBI * FalseWeight for BI.
// We assume that total weights of a BranchInst can fit into 32 bits.
// Therefore, we will not have overflow using 64-bit arithmetic.
- NewWeights.push_back(PredFalseWeight *
- (SuccFalseWeight + SuccTrueWeight) +
- PredTrueWeight * SuccFalseWeight);
+ MDWeights.push_back(PredFalseWeight * (SuccFalseWeight + SuccTrueWeight) +
+ PredTrueWeight * SuccFalseWeight);
} else {
// PBI: br i1 %x, TrueDest, BB
// BI: br i1 %y, TrueDest, UniqueSucc
// TrueWeight is TrueWeight for PBI * TotalWeight for BI +
// FalseWeight for PBI * TrueWeight for BI.
- NewWeights.push_back(PredTrueWeight * (SuccFalseWeight + SuccTrueWeight) +
- PredFalseWeight * SuccTrueWeight);
+ MDWeights.push_back(PredTrueWeight * (SuccFalseWeight + SuccTrueWeight) +
+ PredFalseWeight * SuccTrueWeight);
// FalseWeight is FalseWeight for PBI * FalseWeight for BI.
- NewWeights.push_back(PredFalseWeight * SuccFalseWeight);
+ MDWeights.push_back(PredFalseWeight * SuccFalseWeight);
}
- // Halve the weights if any of them cannot fit in an uint32_t
- fitWeights(NewWeights);
-
- append_range(MDWeights, NewWeights);
- setBranchWeights(PBI, MDWeights[0], MDWeights[1], /*IsExpected=*/false);
+ setFittedBranchWeights(*PBI, MDWeights, /*IsExpected=*/false,
+ /*ElideAllZero=*/true);
// TODO: If BB is reachable from all paths through PredBlock, then we
// could replace PBI's branch probabilities with BI's.
@@ -4125,8 +4082,8 @@ static bool performBranchToCommonDestFolding(BranchInst *BI, BranchInst *PBI,
if (auto *SI = dyn_cast<SelectInst>(PBI->getCondition()))
if (!MDWeights.empty()) {
assert(isSelectInRoleOfConjunctionOrDisjunction(SI));
- setBranchWeights(SI, MDWeights[0], MDWeights[1],
- /*IsExpected=*/false);
+ setFittedBranchWeights(*SI, {MDWeights[0], MDWeights[1]},
+ /*IsExpected=*/false, /*ElideAllZero=*/true);
}
++NumFoldBranchToCommonDest;
@@ -4478,9 +4435,9 @@ static bool mergeConditionalStoreToAddress(
if (InvertQCond)
std::swap(QWeights[0], QWeights[1]);
auto CombinedWeights = getDisjunctionWeights(PWeights, QWeights);
- setBranchWeights(PostBB->getTerminator(), CombinedWeights[0],
- CombinedWeights[1],
- /*IsExpected=*/false);
+ setFittedBranchWeights(*PostBB->getTerminator(),
+ {CombinedWeights[0], CombinedWeights[1]},
+ /*IsExpected=*/false, /*ElideAllZero=*/true);
}
QB.SetInsertPoint(T);
@@ -4836,10 +4793,9 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI,
uint64_t NewWeights[2] = {PredCommon * (SuccCommon + SuccOther) +
PredOther * SuccCommon,
PredOther * SuccOther};
- // Halve the weights if any of them cannot fit in an uint32_t
- fitWeights(NewWeights);
- setBranchWeights(PBI, NewWeights[0], NewWeights[1], /*IsExpected=*/false);
+ setFittedBranchWeights(*PBI, NewWeights, /*IsExpected=*/false,
+ /*ElideAllZero=*/true);
// Cond may be a select instruction with the first operand set to "true", or
// the second to "false" (see how createLogicalOp works for `and` and `or`)
if (!ProfcheckDisableMetadataFixes)
@@ -4849,8 +4805,8 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI,
assert(dyn_cast<SelectInst>(SI)->getCondition() == PBICond);
// The corresponding probabilities are what was referred to above as
// PredCommon and PredOther.
- setBranchWeights(SI, PredCommon, PredOther,
- /*IsExpected=*/false);
+ setFittedBranchWeights(*SI, {PredCommon, PredOther},
+ /*IsExpected=*/false, /*ElideAllZero=*/true);
}
}
@@ -4876,8 +4832,8 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI,
if (HasWeights) {
uint64_t TrueWeight = PBIOp ? PredFalseWeight : PredTrueWeight;
uint64_t FalseWeight = PBIOp ? PredTrueWeight : PredFalseWeight;
- setBranchWeights(NV, TrueWeight, FalseWeight,
- /*IsExpected=*/false);
+ setFittedBranchWeights(*NV, {TrueWeight, FalseWeight},
+ /*IsExpected=*/false, /*ElideAllZero=*/true);
}
}
}
@@ -4940,7 +4896,8 @@ bool SimplifyCFGOpt::simplifyTerminatorOnSelect(Instruction *OldTerm,
// Create a conditional branch sharing the condition of the select.
BranchInst *NewBI = Builder.CreateCondBr(Cond, TrueBB, FalseBB);
if (TrueWeight != FalseWeight)
- setBranchWeights(NewBI, TrueWeight, FalseWeight, /*IsExpected=*/false);
+ setBranchWeights(*NewBI, {TrueWeight, FalseWeight},
+ /*IsExpected=*/false, /*ElideAllZero=*/true);
}
} else if (KeepEdge1 && (KeepEdge2 || TrueBB == FalseBB)) {
// Neither of the selected blocks were successors, so this
@@ -5889,7 +5846,8 @@ bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI,
TrueWeight /= 2;
FalseWeight /= 2;
}
- setBranchWeights(NewBI, TrueWeight, FalseWeight, /*IsExpected=*/false);
+ setFittedBranchWeights(*NewBI, {TrueWeight, FalseWeight},
+ /*IsExpected=*/false, /*ElideAllZero=*/true);
}
}
@@ -6364,9 +6322,9 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector,
// BranchWeights. We want the probability and negative probability of
// Condition == SecondCase.
assert(BranchWeights.size() == 3);
- setBranchWeights(SI, BranchWeights[2],
- BranchWeights[0] + BranchWeights[1],
- /*IsExpected=*/false);
+ setBranchWeights(
+ *SI, {BranchWeights[2], BranchWeights[0] + BranchWeights[1]},
+ /*IsExpected=*/false, /*ElideAllZero=*/true);
}
}
Value *ValueCompare =
@@ -6381,9 +6339,10 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector,
size_t FirstCasePos = (Condition != nullptr);
size_t SecondCasePos = FirstCasePos + 1;
uint32_t DefaultCase = (Condition != nullptr) ? BranchWeights[0] : 0;
- setBranchWeights(SI, BranchWeights[FirstCasePos],
- DefaultCase + BranchWeights[SecondCasePos],
- /*IsExpected=*/false);
+ setBranchWeights(*SI,
+ {BranchWeights[FirstCasePos],
+ DefaultCase + BranchWeights[SecondCasePos]},
+ /*IsExpected=*/false, /*ElideAllZero=*/true);
}
return Ret;
}
@@ -6427,8 +6386,10 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector,
// We know there's a Default case. We base the resulting branch
// weights off its probability.
assert(BranchWeights.size() >= 2);
- setBranchWeights(SI, accumulate(drop_begin(BranchWeights), 0),
- BranchWeights[0], /*IsExpected=*/false);
+ setBranchWeights(
+ *SI,
+ {accumulate(drop_begin(BranchWeights), 0U), BranchWeights[0]},
+ /*IsExpected=*/false, /*ElideAllZero=*/true);
}
return Ret;
}
@@ -6451,8 +6412,10 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector,
Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult);
if (auto *SI = dyn_cast<SelectInst>(Ret); SI && HasBranchWeights) {
assert(BranchWeights.size() >= 2);
- setBranchWeights(SI, accumulate(drop_begin(BranchWeights), 0),
- BranchWeights[0], /*IsExpected=*/false);
+ setBranchWeights(
+ *SI,
+ {accumulate(drop_begin(BranchWeights), 0U), BranchWeights[0]},
+ /*IsExpected=*/false, /*ElideAllZero=*/true);
}
return Ret;
}
@@ -6469,8 +6432,9 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector,
Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult);
if (auto *SI = dyn_cast<SelectInst>(Ret); SI && HasBranchWeights) {
assert(BranchWeights.size() >= 2);
- setBranchWeights(SI, accumulate(drop_begin(BranchWeights), 0),
- BranchWeights[0], /*IsExpected=*/false);
+ setBranchWeights(
+ *SI, {accumulate(drop_begin(BranchWeights), 0U), BranchWeights[0]},
+ /*IsExpected=*/false, /*ElideAllZero=*/true);
}
return Ret;
}
@@ -8152,8 +8116,8 @@ static bool mergeNestedCondBranch(BranchInst *BI, DomTreeUpdater *DTU) {
if (HasWeight) {
uint64_t Weights[2] = {BBTWeight * BB1FWeight + BBFWeight * BB2TWeight,
BBTWeight * BB1TWeight + BBFWeight * BB2FWeight};
- fitWeights(Weights);
- setBranchWeights(BI, Weights[0], Weights[1], /*IsExpected=*/false);
+ setFittedBranchWeights(*BI, Weights, /*IsExpected=*/false,
+ /*ElideAllZero=*/true);
}
return true;
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index ff35db1..7d376c3 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -293,9 +293,8 @@ void LoopVectorizeHints::getHintsFromMetadata() {
}
void LoopVectorizeHints::setHint(StringRef Name, Metadata *Arg) {
- if (!Name.starts_with(Prefix()))
+ if (!Name.consume_front(Prefix()))
return;
- Name = Name.substr(Prefix().size(), StringRef::npos);
const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg);
if (!C)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 12fb46d..7750687 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -3903,7 +3903,8 @@ void LoopVectorizationPlanner::emitInvalidCostRemarks(
if (VF.isScalar())
continue;
- VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind);
+ VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind,
+ *CM.PSE.getSE());
precomputeCosts(*Plan, VF, CostCtx);
auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
@@ -4160,7 +4161,8 @@ VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
// Add on other costs that are modelled in VPlan, but not in the legacy
// cost model.
- VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind);
+ VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind,
+ *CM.PSE.getSE());
VPRegionBlock *VectorRegion = P->getVectorLoopRegion();
assert(VectorRegion && "Expected to have a vector region!");
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
@@ -5699,6 +5701,20 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
Worklist.push_back(InstOp);
}
+ auto UpdateMemOpUserCost = [this, VF](LoadInst *LI) {
+ // If there are direct memory op users of the newly scalarized load,
+ // their cost may have changed because there's no scalarization
+ // overhead for the operand. Update it.
+ for (User *U : LI->users()) {
+ if (!isa<LoadInst, StoreInst>(U))
+ continue;
+ if (getWideningDecision(cast<Instruction>(U), VF) != CM_Scalarize)
+ continue;
+ setWideningDecision(
+ cast<Instruction>(U), VF, CM_Scalarize,
+ getMemInstScalarizationCost(cast<Instruction>(U), VF));
+ }
+ };
for (auto *I : AddrDefs) {
if (isa<LoadInst>(I)) {
// Setting the desired widening decision should ideally be handled in
@@ -5708,21 +5724,24 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
InstWidening Decision = getWideningDecision(I, VF);
if (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
(!isPredicatedInst(I) && !Legal->isUniformMemOp(*I, VF) &&
- Decision == CM_Scalarize))
+ Decision == CM_Scalarize)) {
// Scalarize a widened load of address or update the cost of a scalar
// load of an address.
setWideningDecision(
I, VF, CM_Scalarize,
(VF.getKnownMinValue() *
getMemoryInstructionCost(I, ElementCount::getFixed(1))));
- else if (const auto *Group = getInterleavedAccessGroup(I)) {
+ UpdateMemOpUserCost(cast<LoadInst>(I));
+ } else if (const auto *Group = getInterleavedAccessGroup(I)) {
// Scalarize an interleave group of address loads.
for (unsigned I = 0; I < Group->getFactor(); ++I) {
- if (Instruction *Member = Group->getMember(I))
+ if (Instruction *Member = Group->getMember(I)) {
setWideningDecision(
Member, VF, CM_Scalarize,
(VF.getKnownMinValue() *
getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
+ UpdateMemOpUserCost(cast<LoadInst>(Member));
+ }
}
}
} else {
@@ -6835,7 +6854,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan,
ElementCount VF) const {
- VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind);
+ VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind, *PSE.getSE());
InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
// Now compute and add the VPlan-based cost.
@@ -7068,7 +7087,8 @@ VectorizationFactor LoopVectorizationPlanner::computeBestVF() {
// simplifications not accounted for in the legacy cost model. If that's the
// case, don't trigger the assertion, as the extra simplifications may cause a
// different VF to be picked by the VPlan-based cost model.
- VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind);
+ VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind,
+ *CM.PSE.getSE());
precomputeCosts(BestPlan, BestFactor.Width, CostCtx);
// Verify that the VPlan-based and legacy cost models agree, except for VPlans
// with early exits and plans with additional VPlan simplifications. The
@@ -7937,6 +7957,13 @@ bool VPRecipeBuilder::getScaledReductions(
auto CollectExtInfo = [this, &Exts, &ExtOpTypes,
&ExtKinds](SmallVectorImpl<Value *> &Ops) -> bool {
for (const auto &[I, OpI] : enumerate(Ops)) {
+ auto *CI = dyn_cast<ConstantInt>(OpI);
+ if (I > 0 && CI &&
+ canConstantBeExtended(CI, ExtOpTypes[0], ExtKinds[0])) {
+ ExtOpTypes[I] = ExtOpTypes[0];
+ ExtKinds[I] = ExtKinds[0];
+ continue;
+ }
Value *ExtOp;
if (!match(OpI, m_ZExtOrSExt(m_Value(ExtOp))))
return false;
@@ -8597,7 +8624,8 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
// TODO: Enable following transform when the EVL-version of extended-reduction
// and mulacc-reduction are implemented.
if (!CM.foldTailWithEVL()) {
- VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind);
+ VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind,
+ *CM.PSE.getSE());
VPlanTransforms::runPass(VPlanTransforms::convertToAbstractRecipes, *Plan,
CostCtx, Range);
}
@@ -9521,55 +9549,52 @@ static SmallVector<Instruction *> preparePlanForEpilogueVectorLoop(
VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
Header->setName("vec.epilog.vector.body");
- DenseMap<Value *, Value *> ToFrozen;
- SmallVector<Instruction *> InstsToMove;
// Ensure that the start values for all header phi recipes are updated before
// vectorizing the epilogue loop.
- for (VPRecipeBase &R : Header->phis()) {
- if (auto *IV = dyn_cast<VPCanonicalIVPHIRecipe>(&R)) {
- // When vectorizing the epilogue loop, the canonical induction start
- // value needs to be changed from zero to the value after the main
- // vector loop. Find the resume value created during execution of the main
- // VPlan. It must be the first phi in the loop preheader.
- // FIXME: Improve modeling for canonical IV start values in the epilogue
- // loop.
- using namespace llvm::PatternMatch;
- PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
- for (Value *Inc : EPResumeVal->incoming_values()) {
- if (match(Inc, m_SpecificInt(0)))
- continue;
- assert(!EPI.VectorTripCount &&
- "Must only have a single non-zero incoming value");
- EPI.VectorTripCount = Inc;
- }
- // If we didn't find a non-zero vector trip count, all incoming values
- // must be zero, which also means the vector trip count is zero. Pick the
- // first zero as vector trip count.
- // TODO: We should not choose VF * UF so the main vector loop is known to
- // be dead.
- if (!EPI.VectorTripCount) {
- assert(
- EPResumeVal->getNumIncomingValues() > 0 &&
- all_of(EPResumeVal->incoming_values(),
- [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) &&
- "all incoming values must be 0");
- EPI.VectorTripCount = EPResumeVal->getOperand(0);
- }
- VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
- assert(all_of(IV->users(),
- [](const VPUser *U) {
- return isa<VPScalarIVStepsRecipe>(U) ||
- isa<VPDerivedIVRecipe>(U) ||
- cast<VPRecipeBase>(U)->isScalarCast() ||
- cast<VPInstruction>(U)->getOpcode() ==
- Instruction::Add;
- }) &&
- "the canonical IV should only be used by its increment or "
- "ScalarIVSteps when resetting the start value");
- IV->setOperand(0, VPV);
+ VPCanonicalIVPHIRecipe *IV = Plan.getCanonicalIV();
+ // When vectorizing the epilogue loop, the canonical induction start
+ // value needs to be changed from zero to the value after the main
+ // vector loop. Find the resume value created during execution of the main
+ // VPlan. It must be the first phi in the loop preheader.
+ // FIXME: Improve modeling for canonical IV start values in the epilogue
+ // loop.
+ using namespace llvm::PatternMatch;
+ PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
+ for (Value *Inc : EPResumeVal->incoming_values()) {
+ if (match(Inc, m_SpecificInt(0)))
continue;
- }
+ assert(!EPI.VectorTripCount &&
+ "Must only have a single non-zero incoming value");
+ EPI.VectorTripCount = Inc;
+ }
+ // If we didn't find a non-zero vector trip count, all incoming values
+ // must be zero, which also means the vector trip count is zero. Pick the
+ // first zero as vector trip count.
+ // TODO: We should not choose VF * UF so the main vector loop is known to
+ // be dead.
+ if (!EPI.VectorTripCount) {
+ assert(EPResumeVal->getNumIncomingValues() > 0 &&
+ all_of(EPResumeVal->incoming_values(),
+ [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) &&
+ "all incoming values must be 0");
+ EPI.VectorTripCount = EPResumeVal->getOperand(0);
+ }
+ VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
+ assert(all_of(IV->users(),
+ [](const VPUser *U) {
+ return isa<VPScalarIVStepsRecipe>(U) ||
+ isa<VPDerivedIVRecipe>(U) ||
+ cast<VPRecipeBase>(U)->isScalarCast() ||
+ cast<VPInstruction>(U)->getOpcode() ==
+ Instruction::Add;
+ }) &&
+ "the canonical IV should only be used by its increment or "
+ "ScalarIVSteps when resetting the start value");
+ IV->setOperand(0, VPV);
+ DenseMap<Value *, Value *> ToFrozen;
+ SmallVector<Instruction *> InstsToMove;
+ for (VPRecipeBase &R : drop_begin(Header->phis())) {
Value *ResumeV = nullptr;
// TODO: Move setting of resume values to prepareToExecute.
if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
@@ -10054,7 +10079,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
bool ForceVectorization =
Hints.getForce() == LoopVectorizeHints::FK_Enabled;
VPCostContext CostCtx(CM.TTI, *CM.TLI, LVP.getPlanFor(VF.Width), CM,
- CM.CostKind);
+ CM.CostKind, *CM.PSE.getSE());
if (!ForceVectorization &&
!isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx,
LVP.getPlanFor(VF.Width), SEL,
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index f77d587..fedca65 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -2241,10 +2241,9 @@ public:
/// TODO: If load combining is allowed in the IR optimizer, this analysis
/// may not be necessary.
bool isLoadCombineCandidate(ArrayRef<Value *> Stores) const;
- bool isStridedLoad(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps,
- ArrayRef<unsigned> Order, const TargetTransformInfo &TTI,
- const DataLayout &DL, ScalarEvolution &SE,
- const int64_t Diff, StridedPtrInfo &SPtrInfo) const;
+ bool isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy,
+ Align Alignment, const int64_t Diff, Value *Ptr0,
+ Value *PtrN, StridedPtrInfo &SPtrInfo) const;
/// Checks if the given array of loads can be represented as a vectorized,
/// scatter or just simple gather.
@@ -6824,13 +6823,10 @@ isMaskedLoadCompress(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps,
/// 4. Any pointer operand is an instruction with the users outside of the
/// current graph (for masked gathers extra extractelement instructions
/// might be required).
-bool BoUpSLP::isStridedLoad(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps,
- ArrayRef<unsigned> Order,
- const TargetTransformInfo &TTI,
- const DataLayout &DL, ScalarEvolution &SE,
- const int64_t Diff,
- StridedPtrInfo &SPtrInfo) const {
- const size_t Sz = VL.size();
+bool BoUpSLP::isStridedLoad(ArrayRef<Value *> PointerOps, Type *ScalarTy,
+ Align Alignment, const int64_t Diff, Value *Ptr0,
+ Value *PtrN, StridedPtrInfo &SPtrInfo) const {
+ const size_t Sz = PointerOps.size();
if (Diff % (Sz - 1) != 0)
return false;
@@ -6842,7 +6838,6 @@ bool BoUpSLP::isStridedLoad(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps,
});
const uint64_t AbsoluteDiff = std::abs(Diff);
- Type *ScalarTy = VL.front()->getType();
auto *VecTy = getWidenedType(ScalarTy, Sz);
if (IsAnyPointerUsedOutGraph ||
(AbsoluteDiff > Sz &&
@@ -6853,20 +6848,9 @@ bool BoUpSLP::isStridedLoad(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps,
int64_t Stride = Diff / static_cast<int64_t>(Sz - 1);
if (Diff != Stride * static_cast<int64_t>(Sz - 1))
return false;
- Align Alignment =
- cast<LoadInst>(Order.empty() ? VL.front() : VL[Order.front()])
- ->getAlign();
- if (!TTI.isLegalStridedLoadStore(VecTy, Alignment))
+ if (!TTI->isLegalStridedLoadStore(VecTy, Alignment))
return false;
- Value *Ptr0;
- Value *PtrN;
- if (Order.empty()) {
- Ptr0 = PointerOps.front();
- PtrN = PointerOps.back();
- } else {
- Ptr0 = PointerOps[Order.front()];
- PtrN = PointerOps[Order.back()];
- }
+
// Iterate through all pointers and check if all distances are
// unique multiple of Dist.
SmallSet<int64_t, 4> Dists;
@@ -6875,14 +6859,14 @@ bool BoUpSLP::isStridedLoad(ArrayRef<Value *> VL, ArrayRef<Value *> PointerOps,
if (Ptr == PtrN)
Dist = Diff;
else if (Ptr != Ptr0)
- Dist = *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, DL, SE);
+ Dist = *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, *DL, *SE);
// If the strides are not the same or repeated, we can't
// vectorize.
if (((Dist / Stride) * Stride) != Dist || !Dists.insert(Dist).second)
break;
}
if (Dists.size() == Sz) {
- Type *StrideTy = DL.getIndexType(Ptr0->getType());
+ Type *StrideTy = DL->getIndexType(Ptr0->getType());
SPtrInfo.StrideVal = ConstantInt::get(StrideTy, Stride);
SPtrInfo.Ty = getWidenedType(ScalarTy, Sz);
return true;
@@ -6971,7 +6955,11 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
cast<Instruction>(V), UserIgnoreList);
}))
return LoadsState::CompressVectorize;
- if (isStridedLoad(VL, PointerOps, Order, *TTI, *DL, *SE, *Diff, SPtrInfo))
+ Align Alignment =
+ cast<LoadInst>(Order.empty() ? VL.front() : VL[Order.front()])
+ ->getAlign();
+ if (isStridedLoad(PointerOps, ScalarTy, Alignment, *Diff, Ptr0, PtrN,
+ SPtrInfo))
return LoadsState::StridedVectorize;
}
if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) ||
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 81f1956..2555ebe 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -968,24 +968,36 @@ void VPlan::execute(VPTransformState *State) {
// logic generic during VPlan execution.
State->CFG.DTU.applyUpdates(
{{DominatorTree::Delete, ScalarPh, ScalarPh->getSingleSuccessor()}});
- } else {
+ }
+ ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
+ Entry);
+ // Generate code for the VPlan, in parts of the vector skeleton, loop body and
+ // successor blocks including the middle, exit and scalar preheader blocks.
+ for (VPBlockBase *Block : RPOT)
+ Block->execute(State);
+
+ // If the original loop is unreachable, delete it and all its blocks.
+ if (!ScalarPhVPBB->hasPredecessors()) {
+ // DeleteDeadBlocks will remove single-entry phis. Remove them from the exit
+ // VPIRBBs in VPlan as well, otherwise we would retain references to deleted
+ // IR instructions.
+ for (VPIRBasicBlock *EB : getExitBlocks()) {
+ for (VPRecipeBase &R : make_early_inc_range(EB->phis())) {
+ if (R.getNumOperands() == 1)
+ R.eraseFromParent();
+ }
+ }
+
Loop *OrigLoop =
State->LI->getLoopFor(getScalarHeader()->getIRBasicBlock());
- // If the original loop is unreachable, we need to delete it.
auto Blocks = OrigLoop->getBlocksVector();
Blocks.push_back(cast<VPIRBasicBlock>(ScalarPhVPBB)->getIRBasicBlock());
for (auto *BB : Blocks)
State->LI->removeBlock(BB);
+ DeleteDeadBlocks(Blocks, &State->CFG.DTU);
State->LI->erase(OrigLoop);
}
- ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
- Entry);
- // Generate code for the VPlan, in parts of the vector skeleton, loop body and
- // successor blocks including the middle, exit and scalar preheader blocks.
- for (VPBlockBase *Block : RPOT)
- Block->execute(State);
-
State->CFG.DTU.flush();
VPBasicBlock *Header = vputils::getFirstLoopHeader(*this, State->VPDT);
@@ -1741,6 +1753,16 @@ void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
}
#endif
+bool llvm::canConstantBeExtended(const ConstantInt *CI, Type *NarrowType,
+ TTI::PartialReductionExtendKind ExtKind) {
+ APInt TruncatedVal = CI->getValue().trunc(NarrowType->getScalarSizeInBits());
+ unsigned WideSize = CI->getType()->getScalarSizeInBits();
+ APInt ExtendedVal = ExtKind == TTI::PR_SignExtend
+ ? TruncatedVal.sext(WideSize)
+ : TruncatedVal.zext(WideSize);
+ return ExtendedVal == CI->getValue();
+}
+
TargetTransformInfo::OperandValueInfo
VPCostContext::getOperandInfo(VPValue *V) const {
if (!V->isLiveIn())
@@ -1750,7 +1772,8 @@ VPCostContext::getOperandInfo(VPValue *V) const {
}
InstructionCost VPCostContext::getScalarizationOverhead(
- Type *ResultTy, ArrayRef<const VPValue *> Operands, ElementCount VF) {
+ Type *ResultTy, ArrayRef<const VPValue *> Operands, ElementCount VF,
+ bool AlwaysIncludeReplicatingR) {
if (VF.isScalar())
return 0;
@@ -1770,7 +1793,11 @@ InstructionCost VPCostContext::getScalarizationOverhead(
SmallPtrSet<const VPValue *, 4> UniqueOperands;
SmallVector<Type *> Tys;
for (auto *Op : Operands) {
- if (Op->isLiveIn() || isa<VPReplicateRecipe, VPPredInstPHIRecipe>(Op) ||
+ if (Op->isLiveIn() ||
+ (!AlwaysIncludeReplicatingR &&
+ isa<VPReplicateRecipe, VPPredInstPHIRecipe>(Op)) ||
+ (isa<VPReplicateRecipe>(Op) &&
+ cast<VPReplicateRecipe>(Op)->getOpcode() == Instruction::Load) ||
!UniqueOperands.insert(Op).second)
continue;
Tys.push_back(toVectorizedTy(Types.inferScalarType(Op), VF));
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 10d704d..c167dd7 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -29,6 +29,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist.h"
@@ -2977,7 +2978,8 @@ public:
/// the expression is elevated to connect the non-expression recipe with the
/// VPExpressionRecipe itself.
class VPExpressionRecipe : public VPSingleDefRecipe {
- /// Recipes included in this VPExpressionRecipe.
+ /// Recipes included in this VPExpressionRecipe. This could contain
+ /// duplicates.
SmallVector<VPSingleDefRecipe *> ExpressionRecipes;
/// Temporary VPValues used for external operands of the expression, i.e.
@@ -3039,8 +3041,11 @@ public:
}
~VPExpressionRecipe() override {
- for (auto *R : reverse(ExpressionRecipes))
- delete R;
+ SmallPtrSet<VPSingleDefRecipe *, 4> ExpressionRecipesSeen;
+ for (auto *R : reverse(ExpressionRecipes)) {
+ if (ExpressionRecipesSeen.insert(R).second)
+ delete R;
+ }
for (VPValue *T : LiveInPlaceholders)
delete T;
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
index fe59774..1580a3b 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
@@ -349,12 +349,14 @@ struct VPCostContext {
LoopVectorizationCostModel &CM;
SmallPtrSet<Instruction *, 8> SkipCostComputation;
TargetTransformInfo::TargetCostKind CostKind;
+ ScalarEvolution &SE;
VPCostContext(const TargetTransformInfo &TTI, const TargetLibraryInfo &TLI,
const VPlan &Plan, LoopVectorizationCostModel &CM,
- TargetTransformInfo::TargetCostKind CostKind)
+ TargetTransformInfo::TargetCostKind CostKind,
+ ScalarEvolution &SE)
: TTI(TTI), TLI(TLI), Types(Plan), LLVMCtx(Plan.getContext()), CM(CM),
- CostKind(CostKind) {}
+ CostKind(CostKind), SE(SE) {}
/// Return the cost for \p UI with \p VF using the legacy cost model as
/// fallback until computing the cost of all recipes migrates to VPlan.
@@ -374,10 +376,12 @@ struct VPCostContext {
/// Estimate the overhead of scalarizing a recipe with result type \p ResultTy
/// and \p Operands with \p VF. This is a convenience wrapper for the
- /// type-based getScalarizationOverhead API.
- InstructionCost getScalarizationOverhead(Type *ResultTy,
- ArrayRef<const VPValue *> Operands,
- ElementCount VF);
+ /// type-based getScalarizationOverhead API. If \p AlwaysIncludeReplicatingR
+ /// is true, always compute the cost of scalarizing replicating operands.
+ InstructionCost
+ getScalarizationOverhead(Type *ResultTy, ArrayRef<const VPValue *> Operands,
+ ElementCount VF,
+ bool AlwaysIncludeReplicatingR = false);
};
/// This class can be used to assign names to VPValues. For VPValues without
@@ -468,6 +472,10 @@ public:
};
#endif
+/// Check if a constant \p CI can be safely treated as having been extended
+/// from a narrower type with the given extension kind.
+bool canConstantBeExtended(const ConstantInt *CI, Type *NarrowType,
+ TTI::PartialReductionExtendKind ExtKind);
} // end namespace llvm
#endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 3a55710..43d61f2 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -40,6 +40,7 @@
#include <cassert>
using namespace llvm;
+using namespace llvm::VPlanPatternMatch;
using VectorParts = SmallVector<Value *, 2>;
@@ -303,7 +304,6 @@ VPPartialReductionRecipe::computeCost(ElementCount VF,
VPRecipeBase *OpR = Op->getDefiningRecipe();
// If the partial reduction is predicated, a select will be operand 0
- using namespace llvm::VPlanPatternMatch;
if (match(getOperand(1), m_Select(m_VPValue(), m_VPValue(Op), m_VPValue()))) {
OpR = Op->getDefiningRecipe();
}
@@ -340,6 +340,14 @@ VPPartialReductionRecipe::computeCost(ElementCount VF,
: Widen->getOperand(1));
ExtAType = GetExtendKind(ExtAR);
ExtBType = GetExtendKind(ExtBR);
+
+ if (!ExtBR && Widen->getOperand(1)->isLiveIn()) {
+ auto *CI = cast<ConstantInt>(Widen->getOperand(1)->getLiveInIRValue());
+ if (canConstantBeExtended(CI, InputTypeA, ExtAType)) {
+ InputTypeB = InputTypeA;
+ ExtBType = ExtAType;
+ }
+ }
};
if (isa<VPWidenCastRecipe>(OpR)) {
@@ -1955,7 +1963,6 @@ InstructionCost VPWidenSelectRecipe::computeCost(ElementCount VF,
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
VPValue *Op0, *Op1;
- using namespace llvm::VPlanPatternMatch;
if (!ScalarCond && ScalarTy->getScalarSizeInBits() == 1 &&
(match(this, m_LogicalAnd(m_VPValue(Op0), m_VPValue(Op1))) ||
match(this, m_LogicalOr(m_VPValue(Op0), m_VPValue(Op1))))) {
@@ -2755,10 +2762,7 @@ VPExpressionRecipe::VPExpressionRecipe(
ExpressionTypes ExpressionType,
ArrayRef<VPSingleDefRecipe *> ExpressionRecipes)
: VPSingleDefRecipe(VPDef::VPExpressionSC, {}, {}),
- ExpressionRecipes(SetVector<VPSingleDefRecipe *>(
- ExpressionRecipes.begin(), ExpressionRecipes.end())
- .takeVector()),
- ExpressionType(ExpressionType) {
+ ExpressionRecipes(ExpressionRecipes), ExpressionType(ExpressionType) {
assert(!ExpressionRecipes.empty() && "Nothing to combine?");
assert(
none_of(ExpressionRecipes,
@@ -2802,14 +2806,22 @@ VPExpressionRecipe::VPExpressionRecipe(
continue;
addOperand(Op);
LiveInPlaceholders.push_back(new VPValue());
- R->setOperand(Idx, LiveInPlaceholders.back());
}
}
+
+ // Replace each external operand with the first one created for it in
+ // LiveInPlaceholders.
+ for (auto *R : ExpressionRecipes)
+ for (auto const &[LiveIn, Tmp] : zip(operands(), LiveInPlaceholders))
+ R->replaceUsesOfWith(LiveIn, Tmp);
}
void VPExpressionRecipe::decompose() {
for (auto *R : ExpressionRecipes)
- R->insertBefore(this);
+ // Since the list could contain duplicates, make sure the recipe hasn't
+ // already been inserted.
+ if (!R->getParent())
+ R->insertBefore(this);
for (const auto &[Idx, Op] : enumerate(operands()))
LiveInPlaceholders[Idx]->replaceAllUsesWith(Op);
@@ -3098,6 +3110,62 @@ bool VPReplicateRecipe::shouldPack() const {
});
}
+/// Returns true if \p Ptr is a pointer computation for which the legacy cost
+/// model computes a SCEV expression when computing the address cost.
+static bool shouldUseAddressAccessSCEV(const VPValue *Ptr) {
+ auto *PtrR = Ptr->getDefiningRecipe();
+ if (!PtrR || !((isa<VPReplicateRecipe>(PtrR) &&
+ cast<VPReplicateRecipe>(PtrR)->getOpcode() ==
+ Instruction::GetElementPtr) ||
+ isa<VPWidenGEPRecipe>(PtrR) ||
+ match(Ptr, m_GetElementPtr(m_VPValue(), m_VPValue()))))
+ return false;
+
+ // We are looking for a GEP where all indices are either loop invariant or
+ // inductions.
+ for (VPValue *Opd : drop_begin(PtrR->operands())) {
+ if (!Opd->isDefinedOutsideLoopRegions() &&
+ !isa<VPScalarIVStepsRecipe, VPWidenIntOrFpInductionRecipe>(Opd))
+ return false;
+ }
+
+ return true;
+}
+
+/// Returns true if \p V is used as part of the address of another load or
+/// store.
+static bool isUsedByLoadStoreAddress(const VPUser *V) {
+ SmallPtrSet<const VPUser *, 4> Seen;
+ SmallVector<const VPUser *> WorkList = {V};
+
+ while (!WorkList.empty()) {
+ auto *Cur = dyn_cast<VPSingleDefRecipe>(WorkList.pop_back_val());
+ if (!Cur || !Seen.insert(Cur).second)
+ continue;
+
+ for (VPUser *U : Cur->users()) {
+ if (auto *InterleaveR = dyn_cast<VPInterleaveBase>(U))
+ if (InterleaveR->getAddr() == Cur)
+ return true;
+ if (auto *RepR = dyn_cast<VPReplicateRecipe>(U)) {
+ if (RepR->getOpcode() == Instruction::Load &&
+ RepR->getOperand(0) == Cur)
+ return true;
+ if (RepR->getOpcode() == Instruction::Store &&
+ RepR->getOperand(1) == Cur)
+ return true;
+ }
+ if (auto *MemR = dyn_cast<VPWidenMemoryRecipe>(U)) {
+ if (MemR->getAddr() == Cur && MemR->isConsecutive())
+ return true;
+ }
+ }
+
+ append_range(WorkList, cast<VPSingleDefRecipe>(Cur)->users());
+ }
+ return false;
+}
+
InstructionCost VPReplicateRecipe::computeCost(ElementCount VF,
VPCostContext &Ctx) const {
Instruction *UI = cast<Instruction>(getUnderlyingValue());
@@ -3205,21 +3273,58 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF,
}
case Instruction::Load:
case Instruction::Store: {
- if (isSingleScalar()) {
- bool IsLoad = UI->getOpcode() == Instruction::Load;
- Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0));
- Type *ScalarPtrTy = Ctx.Types.inferScalarType(getOperand(IsLoad ? 0 : 1));
- const Align Alignment = getLoadStoreAlignment(UI);
- unsigned AS = getLoadStoreAddressSpace(UI);
- TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(UI->getOperand(0));
- InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost(
- UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo, UI);
- return ScalarMemOpCost + Ctx.TTI.getAddressComputationCost(
- ScalarPtrTy, nullptr, nullptr, Ctx.CostKind);
- }
+ if (VF.isScalable() && !isSingleScalar())
+ return InstructionCost::getInvalid();
+
// TODO: See getMemInstScalarizationCost for how to handle replicating and
// predicated cases.
- break;
+ const VPRegionBlock *ParentRegion = getParent()->getParent();
+ if (ParentRegion && ParentRegion->isReplicator())
+ break;
+
+ bool IsLoad = UI->getOpcode() == Instruction::Load;
+ const VPValue *PtrOp = getOperand(!IsLoad);
+ // TODO: Handle cases where we need to pass a SCEV to
+ // getAddressComputationCost.
+ if (shouldUseAddressAccessSCEV(PtrOp))
+ break;
+
+ Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0));
+ Type *ScalarPtrTy = Ctx.Types.inferScalarType(PtrOp);
+ const Align Alignment = getLoadStoreAlignment(UI);
+ unsigned AS = getLoadStoreAddressSpace(UI);
+ TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(UI->getOperand(0));
+ InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost(
+ UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo);
+
+ Type *PtrTy = isSingleScalar() ? ScalarPtrTy : toVectorTy(ScalarPtrTy, VF);
+
+ InstructionCost ScalarCost =
+ ScalarMemOpCost + Ctx.TTI.getAddressComputationCost(
+ PtrTy, &Ctx.SE, nullptr, Ctx.CostKind);
+ if (isSingleScalar())
+ return ScalarCost;
+
+ SmallVector<const VPValue *> OpsToScalarize;
+ Type *ResultTy = Type::getVoidTy(PtrTy->getContext());
+ // Set ResultTy and OpsToScalarize, if scalarization is needed. Currently we
+ // don't assign scalarization overhead in general, if the target prefers
+ // vectorized addressing or the loaded value is used as part of an address
+ // of another load or store.
+ bool PreferVectorizedAddressing = Ctx.TTI.prefersVectorizedAddressing();
+ if (PreferVectorizedAddressing || !isUsedByLoadStoreAddress(this)) {
+ bool EfficientVectorLoadStore =
+ Ctx.TTI.supportsEfficientVectorElementLoadStore();
+ if (!(IsLoad && !PreferVectorizedAddressing) &&
+ !(!IsLoad && EfficientVectorLoadStore))
+ append_range(OpsToScalarize, operands());
+
+ if (!EfficientVectorLoadStore)
+ ResultTy = Ctx.Types.inferScalarType(this);
+ }
+
+ return (ScalarCost * VF.getFixedValue()) +
+ Ctx.getScalarizationOverhead(ResultTy, OpsToScalarize, VF, true);
}
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index a73b083..f76777b 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -40,7 +40,7 @@
using namespace llvm;
using namespace VPlanPatternMatch;
-cl::opt<bool> EnableWideActiveLaneMask(
+static cl::opt<bool> EnableWideActiveLaneMask(
"enable-wide-lane-mask", cl::init(false), cl::Hidden,
cl::desc("Enable use of wide get active lane mask instructions"));
@@ -1110,8 +1110,7 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
// x && !x -> 0
if (match(&R, m_LogicalAnd(m_VPValue(X), m_Not(m_Deferred(X)))))
- return Def->replaceAllUsesWith(Plan->getOrAddLiveIn(
- ConstantInt::getFalse(VPTypeAnalysis(*Plan).inferScalarType(Def))));
+ return Def->replaceAllUsesWith(Plan->getFalse());
if (match(Def, m_Select(m_VPValue(), m_VPValue(X), m_Deferred(X))))
return Def->replaceAllUsesWith(X);
@@ -3346,12 +3345,7 @@ void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) {
VectorStep = Builder.createWidenCast(CastOp, VectorStep, IVTy);
}
- [[maybe_unused]] auto *ConstStep =
- ScalarStep->isLiveIn()
- ? dyn_cast<ConstantInt>(ScalarStep->getLiveInIRValue())
- : nullptr;
- assert(!ConstStep || ConstStep->getValue() != 1);
- (void)ConstStep;
+ assert(!match(ScalarStep, m_One()) && "Expected non-unit scalar-step");
if (TypeInfo.inferScalarType(ScalarStep) != IVTy) {
ScalarStep =
Builder.createWidenCast(Instruction::Trunc, ScalarStep, IVTy);
diff --git a/llvm/runtimes/CMakeLists.txt b/llvm/runtimes/CMakeLists.txt
index 8399292..6f98eae 100644
--- a/llvm/runtimes/CMakeLists.txt
+++ b/llvm/runtimes/CMakeLists.txt
@@ -507,10 +507,14 @@ if(build_runtimes)
endif()
# Forward user-provived system configuration to runtimes for requirement introspection.
- # CMAKE_PREFIX_PATH is the search path for CMake packages.
+ # CMAKE_PREFIX_PATH is the search path for CMake packages. In order to pass through
+ # the command line interface, the CMake semicolon separator needs to be replaced
+ # with $<SEMICOLON>
if(CMAKE_PREFIX_PATH)
- list(APPEND extra_cmake_args "-DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH}")
+ string(JOIN "$<SEMICOLON>" escaped_cmake_prefix_path ${CMAKE_PREFIX_PATH})
+ list(APPEND extra_cmake_args "-DCMAKE_PREFIX_PATH=${escaped_cmake_prefix_path}")
endif()
+
# CMAKE_PROGRAM_PATH is the search path for executables such as python.
if(CMAKE_PROGRAM_PATH)
list(APPEND extra_cmake_args "-DCMAKE_PROGRAM_PATH=${CMAKE_PROGRAM_PATH}")
diff --git a/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json b/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json
index 07fde84..ae36ff5 100644
--- a/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json
+++ b/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json
@@ -87,6 +87,32 @@
"Function": [1, 2],
"Pointer": [3, 4],
"Constant": [5, 6],
- "Variable": [7, 8]
+ "Variable": [7, 8],
+ "FCMP_false": [9, 10],
+ "FCMP_oeq": [11, 12],
+ "FCMP_ogt": [13, 14],
+ "FCMP_oge": [15, 16],
+ "FCMP_olt": [17, 18],
+ "FCMP_ole": [19, 20],
+ "FCMP_one": [21, 22],
+ "FCMP_ord": [23, 24],
+ "FCMP_uno": [25, 26],
+ "FCMP_ueq": [27, 28],
+ "FCMP_ugt": [29, 30],
+ "FCMP_uge": [31, 32],
+ "FCMP_ult": [33, 34],
+ "FCMP_ule": [35, 36],
+ "FCMP_une": [37, 38],
+ "FCMP_true": [39, 40],
+ "ICMP_eq": [41, 42],
+ "ICMP_ne": [43, 44],
+ "ICMP_ugt": [45, 46],
+ "ICMP_uge": [47, 48],
+ "ICMP_ult": [49, 50],
+ "ICMP_ule": [51, 52],
+ "ICMP_sgt": [53, 54],
+ "ICMP_sge": [55, 56],
+ "ICMP_slt": [57, 58],
+ "ICMP_sle": [59, 60]
}
}
diff --git a/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json b/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json
index 932b3a2..9003dc7 100644
--- a/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json
+++ b/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json
@@ -86,6 +86,32 @@
"Function": [1, 2, 3],
"Pointer": [4, 5, 6],
"Constant": [7, 8, 9],
- "Variable": [10, 11, 12]
+ "Variable": [10, 11, 12],
+ "FCMP_false": [13, 14, 15],
+ "FCMP_oeq": [16, 17, 18],
+ "FCMP_ogt": [19, 20, 21],
+ "FCMP_oge": [22, 23, 24],
+ "FCMP_olt": [25, 26, 27],
+ "FCMP_ole": [28, 29, 30],
+ "FCMP_one": [31, 32, 33],
+ "FCMP_ord": [34, 35, 36],
+ "FCMP_uno": [37, 38, 39],
+ "FCMP_ueq": [40, 41, 42],
+ "FCMP_ugt": [43, 44, 45],
+ "FCMP_uge": [46, 47, 48],
+ "FCMP_ult": [49, 50, 51],
+ "FCMP_ule": [52, 53, 54],
+ "FCMP_une": [55, 56, 57],
+ "FCMP_true": [58, 59, 60],
+ "ICMP_eq": [61, 62, 63],
+ "ICMP_ne": [64, 65, 66],
+ "ICMP_ugt": [67, 68, 69],
+ "ICMP_uge": [70, 71, 72],
+ "ICMP_ult": [73, 74, 75],
+ "ICMP_ule": [76, 77, 78],
+ "ICMP_sgt": [79, 80, 81],
+ "ICMP_sge": [82, 83, 84],
+ "ICMP_slt": [85, 86, 87],
+ "ICMP_sle": [88, 89, 90]
}
}
diff --git a/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json b/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json
index 19f3efe..7ef8549 100644
--- a/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json
+++ b/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json
@@ -47,6 +47,7 @@
"FPTrunc": [133, 134, 135],
"FPExt": [136, 137, 138],
"PtrToInt": [139, 140, 141],
+ "PtrToAddr": [202, 203, 204],
"IntToPtr": [142, 143, 144],
"BitCast": [145, 146, 147],
"AddrSpaceCast": [148, 149, 150],
@@ -86,6 +87,32 @@
"Function": [0, 0, 0],
"Pointer": [0, 0, 0],
"Constant": [0, 0, 0],
- "Variable": [0, 0, 0]
+ "Variable": [0, 0, 0],
+ "FCMP_false": [0, 0, 0],
+ "FCMP_oeq": [0, 0, 0],
+ "FCMP_ogt": [0, 0, 0],
+ "FCMP_oge": [0, 0, 0],
+ "FCMP_olt": [0, 0, 0],
+ "FCMP_ole": [0, 0, 0],
+ "FCMP_one": [0, 0, 0],
+ "FCMP_ord": [0, 0, 0],
+ "FCMP_uno": [0, 0, 0],
+ "FCMP_ueq": [0, 0, 0],
+ "FCMP_ugt": [0, 0, 0],
+ "FCMP_uge": [0, 0, 0],
+ "FCMP_ult": [0, 0, 0],
+ "FCMP_ule": [0, 0, 0],
+ "FCMP_une": [0, 0, 0],
+ "FCMP_true": [0, 0, 0],
+ "ICMP_eq": [0, 0, 0],
+ "ICMP_ne": [0, 0, 0],
+ "ICMP_ugt": [0, 0, 0],
+ "ICMP_uge": [0, 0, 0],
+ "ICMP_ult": [0, 0, 0],
+ "ICMP_ule": [0, 0, 0],
+ "ICMP_sgt": [1, 1, 1],
+ "ICMP_sge": [0, 0, 0],
+ "ICMP_slt": [0, 0, 0],
+ "ICMP_sle": [0, 0, 0]
}
}
diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt
index df7769c..d62b0dd 100644
--- a/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt
+++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt
@@ -82,3 +82,29 @@ Key: Function: [ 0.20 0.40 ]
Key: Pointer: [ 0.60 0.80 ]
Key: Constant: [ 1.00 1.20 ]
Key: Variable: [ 1.40 1.60 ]
+Key: FCMP_false: [ 1.80 2.00 ]
+Key: FCMP_oeq: [ 2.20 2.40 ]
+Key: FCMP_ogt: [ 2.60 2.80 ]
+Key: FCMP_oge: [ 3.00 3.20 ]
+Key: FCMP_olt: [ 3.40 3.60 ]
+Key: FCMP_ole: [ 3.80 4.00 ]
+Key: FCMP_one: [ 4.20 4.40 ]
+Key: FCMP_ord: [ 4.60 4.80 ]
+Key: FCMP_uno: [ 5.00 5.20 ]
+Key: FCMP_ueq: [ 5.40 5.60 ]
+Key: FCMP_ugt: [ 5.80 6.00 ]
+Key: FCMP_uge: [ 6.20 6.40 ]
+Key: FCMP_ult: [ 6.60 6.80 ]
+Key: FCMP_ule: [ 7.00 7.20 ]
+Key: FCMP_une: [ 7.40 7.60 ]
+Key: FCMP_true: [ 7.80 8.00 ]
+Key: ICMP_eq: [ 8.20 8.40 ]
+Key: ICMP_ne: [ 8.60 8.80 ]
+Key: ICMP_ugt: [ 9.00 9.20 ]
+Key: ICMP_uge: [ 9.40 9.60 ]
+Key: ICMP_ult: [ 9.80 10.00 ]
+Key: ICMP_ule: [ 10.20 10.40 ]
+Key: ICMP_sgt: [ 10.60 10.80 ]
+Key: ICMP_sge: [ 11.00 11.20 ]
+Key: ICMP_slt: [ 11.40 11.60 ]
+Key: ICMP_sle: [ 11.80 12.00 ]
diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt
index f3ce809..e443adb 100644
--- a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt
+++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt
@@ -82,3 +82,29 @@ Key: Function: [ 0.50 1.00 ]
Key: Pointer: [ 1.50 2.00 ]
Key: Constant: [ 2.50 3.00 ]
Key: Variable: [ 3.50 4.00 ]
+Key: FCMP_false: [ 4.50 5.00 ]
+Key: FCMP_oeq: [ 5.50 6.00 ]
+Key: FCMP_ogt: [ 6.50 7.00 ]
+Key: FCMP_oge: [ 7.50 8.00 ]
+Key: FCMP_olt: [ 8.50 9.00 ]
+Key: FCMP_ole: [ 9.50 10.00 ]
+Key: FCMP_one: [ 10.50 11.00 ]
+Key: FCMP_ord: [ 11.50 12.00 ]
+Key: FCMP_uno: [ 12.50 13.00 ]
+Key: FCMP_ueq: [ 13.50 14.00 ]
+Key: FCMP_ugt: [ 14.50 15.00 ]
+Key: FCMP_uge: [ 15.50 16.00 ]
+Key: FCMP_ult: [ 16.50 17.00 ]
+Key: FCMP_ule: [ 17.50 18.00 ]
+Key: FCMP_une: [ 18.50 19.00 ]
+Key: FCMP_true: [ 19.50 20.00 ]
+Key: ICMP_eq: [ 20.50 21.00 ]
+Key: ICMP_ne: [ 21.50 22.00 ]
+Key: ICMP_ugt: [ 22.50 23.00 ]
+Key: ICMP_uge: [ 23.50 24.00 ]
+Key: ICMP_ult: [ 24.50 25.00 ]
+Key: ICMP_ule: [ 25.50 26.00 ]
+Key: ICMP_sgt: [ 26.50 27.00 ]
+Key: ICMP_sge: [ 27.50 28.00 ]
+Key: ICMP_slt: [ 28.50 29.00 ]
+Key: ICMP_sle: [ 29.50 30.00 ]
diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt
index 72b25b9..7fb6043 100644
--- a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt
+++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt
@@ -82,3 +82,29 @@ Key: Function: [ 0.00 0.00 ]
Key: Pointer: [ 0.00 0.00 ]
Key: Constant: [ 0.00 0.00 ]
Key: Variable: [ 0.00 0.00 ]
+Key: FCMP_false: [ 0.00 0.00 ]
+Key: FCMP_oeq: [ 0.00 0.00 ]
+Key: FCMP_ogt: [ 0.00 0.00 ]
+Key: FCMP_oge: [ 0.00 0.00 ]
+Key: FCMP_olt: [ 0.00 0.00 ]
+Key: FCMP_ole: [ 0.00 0.00 ]
+Key: FCMP_one: [ 0.00 0.00 ]
+Key: FCMP_ord: [ 0.00 0.00 ]
+Key: FCMP_uno: [ 0.00 0.00 ]
+Key: FCMP_ueq: [ 0.00 0.00 ]
+Key: FCMP_ugt: [ 0.00 0.00 ]
+Key: FCMP_uge: [ 0.00 0.00 ]
+Key: FCMP_ult: [ 0.00 0.00 ]
+Key: FCMP_ule: [ 0.00 0.00 ]
+Key: FCMP_une: [ 0.00 0.00 ]
+Key: FCMP_true: [ 0.00 0.00 ]
+Key: ICMP_eq: [ 0.00 0.00 ]
+Key: ICMP_ne: [ 0.00 0.00 ]
+Key: ICMP_ugt: [ 0.00 0.00 ]
+Key: ICMP_uge: [ 0.00 0.00 ]
+Key: ICMP_ult: [ 0.00 0.00 ]
+Key: ICMP_ule: [ 0.00 0.00 ]
+Key: ICMP_sgt: [ 0.00 0.00 ]
+Key: ICMP_sge: [ 0.00 0.00 ]
+Key: ICMP_slt: [ 0.00 0.00 ]
+Key: ICMP_sle: [ 0.00 0.00 ]
diff --git a/llvm/test/Analysis/IR2Vec/if-else.ll b/llvm/test/Analysis/IR2Vec/if-else.ll
index fe53247..804c1ca 100644
--- a/llvm/test/Analysis/IR2Vec/if-else.ll
+++ b/llvm/test/Analysis/IR2Vec/if-else.ll
@@ -29,7 +29,7 @@ return: ; preds = %if.else, %if.then
; CHECK: Basic block vectors:
; CHECK-NEXT: Basic block: entry:
-; CHECK-NEXT: [ 816.00 825.00 834.00 ]
+; CHECK-NEXT: [ 816.20 825.20 834.20 ]
; CHECK-NEXT: Basic block: if.then:
; CHECK-NEXT: [ 195.00 198.00 201.00 ]
; CHECK-NEXT: Basic block: if.else:
diff --git a/llvm/test/Analysis/IR2Vec/unreachable.ll b/llvm/test/Analysis/IR2Vec/unreachable.ll
index b0e3e49..9be0ee1 100644
--- a/llvm/test/Analysis/IR2Vec/unreachable.ll
+++ b/llvm/test/Analysis/IR2Vec/unreachable.ll
@@ -33,7 +33,7 @@ return: ; preds = %if.else, %if.then
; CHECK: Basic block vectors:
; CHECK-NEXT: Basic block: entry:
-; CHECK-NEXT: [ 816.00 825.00 834.00 ]
+; CHECK-NEXT: [ 816.20 825.20 834.20 ]
; CHECK-NEXT: Basic block: if.then:
; CHECK-NEXT: [ 195.00 198.00 201.00 ]
; CHECK-NEXT: Basic block: if.else:
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll b/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll
index 023a8c0..27a85c7 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll
@@ -560,3 +560,44 @@ loop:
exit:
ret void
}
+
+; TODO: Relax HasSameSize check in isSafeDependenceDistance.
+define void @different_type_sizes_safe_dep_dist(i16 %n, ptr %p) {
+; CHECK-LABEL: 'different_type_sizes_safe_dep_dist'
+; CHECK-NEXT: loop:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Unknown:
+; CHECK-NEXT: store i32 0, ptr %gep.iv, align 1 ->
+; CHECK-NEXT: store i16 1, ptr %gep.off.iv, align 1
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ %n.pos = icmp sgt i16 %n, 0
+ br i1 %n.pos, label %ph, label %exit
+
+ph:
+ %gep.off = getelementptr i32, ptr %p, i16 %n
+ br label %loop
+
+loop:
+ %iv = phi i16 [ 0, %ph ], [ %iv.next, %loop ]
+ %gep.iv = getelementptr inbounds i32, ptr %p, i16 %iv
+ store i32 0, ptr %gep.iv, align 1
+ %gep.off.iv = getelementptr i32, ptr %gep.off, i16 %iv
+ store i16 1, ptr %gep.off.iv, align 1
+ %iv.next = add i16 %iv, 1
+ %exit.cond = icmp eq i16 %iv.next, %n
+ br i1 %exit.cond, label %exit, label %loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll b/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll
index a08f859..6d9aa8d 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll
@@ -756,3 +756,129 @@ e.1:
e.2:
ret void
}
+
+define void @all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_nofree_via_context(ptr %A, ptr %B) nosync {
+; CHECK-LABEL: 'all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_nofree_via_context'
+; CHECK-NEXT: loop.header:
+; CHECK-NEXT: Memory dependences are safe with run-time checks
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Check 0:
+; CHECK-NEXT: Comparing group GRP0:
+; CHECK-NEXT: %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv
+; CHECK-NEXT: Against group GRP1:
+; CHECK-NEXT: %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv
+; CHECK-NEXT: Grouped accesses:
+; CHECK-NEXT: Group GRP0:
+; CHECK-NEXT: (Low: %B High: inttoptr (i64 -1 to ptr))
+; CHECK-NEXT: Member: {%B,+,4}<nuw><%loop.header>
+; CHECK-NEXT: Group GRP1:
+; CHECK-NEXT: (Low: %A High: inttoptr (i64 -1 to ptr))
+; CHECK-NEXT: Member: {%A,+,4}<nuw><%loop.header>
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %A, i64 2000) ]
+ call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %B, i64 2000) ]
+ br label %loop.header
+
+loop.header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv
+ %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv
+ %l = load i32, ptr %gep.A, align 4
+ store i32 0, ptr %gep.B, align 4
+ %cntable.c.1 = icmp ult i64 %iv, 1000
+ %iv.next = add nuw nsw i64 %iv, 1
+ br i1 %cntable.c.1, label %b2, label %e.1
+
+b2:
+ %uncntable.c.0 = icmp eq i32 %l, 0
+ br i1 %uncntable.c.0, label %e.2, label %b3
+
+b3:
+ %cntable.c.2 = icmp eq i64 %iv.next, 500
+ br i1 %cntable.c.2, label %cleanup4, label %latch
+
+latch:
+ br label %loop.header
+
+cleanup4:
+ ret void
+
+e.1:
+ ret void
+
+e.2:
+ ret void
+}
+
+define void @all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_missing_nofree_multiple_predecessors(ptr %A, ptr %B, i1 %c) nosync {
+; CHECK-LABEL: 'all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_missing_nofree_multiple_predecessors'
+; CHECK-NEXT: loop.header:
+; CHECK-NEXT: Memory dependences are safe with run-time checks
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Check 0:
+; CHECK-NEXT: Comparing group GRP0:
+; CHECK-NEXT: %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv
+; CHECK-NEXT: Against group GRP1:
+; CHECK-NEXT: %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv
+; CHECK-NEXT: Grouped accesses:
+; CHECK-NEXT: Group GRP0:
+; CHECK-NEXT: (Low: %B High: inttoptr (i64 -1 to ptr))
+; CHECK-NEXT: Member: {%B,+,4}<nuw><%loop.header>
+; CHECK-NEXT: Group GRP1:
+; CHECK-NEXT: (Low: %A High: inttoptr (i64 -1 to ptr))
+; CHECK-NEXT: Member: {%A,+,4}<nuw><%loop.header>
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %A, i64 2000) ]
+ call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %B, i64 2000) ]
+ br i1 %c, label %then, label %else
+
+then:
+ br label %loop.header
+
+else:
+ br label %loop.header
+
+loop.header:
+ %iv = phi i64 [ 0, %then ], [ 0, %else ], [ %iv.next, %latch ]
+ %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv
+ %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv
+ %l = load i32, ptr %gep.A, align 4
+ store i32 0, ptr %gep.B, align 4
+ %cntable.c.1 = icmp ult i64 %iv, 1000
+ %iv.next = add nuw nsw i64 %iv, 1
+ br i1 %cntable.c.1, label %b2, label %e.1
+
+b2:
+ %uncntable.c.0 = icmp eq i32 %l, 0
+ br i1 %uncntable.c.0, label %e.2, label %b3
+
+b3:
+ %cntable.c.2 = icmp eq i64 %iv.next, 500
+ br i1 %cntable.c.2, label %cleanup4, label %latch
+
+latch:
+ br label %loop.header
+
+cleanup4:
+ ret void
+
+e.1:
+ ret void
+
+e.2:
+ ret void
+}
diff --git a/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll b/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll
index 1e21fbf..e1c6230 100644
--- a/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll
+++ b/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll
@@ -188,3 +188,43 @@ loop:
exit:
ret void
}
+
+define noundef i64 @udiv_mul_common_vscale_factor(i64 %a, i64 %b) {
+; CHECK-LABEL: 'udiv_mul_common_vscale_factor'
+; CHECK-NEXT: Classifying expressions for: @udiv_mul_common_vscale_factor
+; CHECK-NEXT: %vs = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: --> vscale U: [1,0) S: [1,0)
+; CHECK-NEXT: %a.vs = mul i64 %a, %vs
+; CHECK-NEXT: --> (vscale * %a) U: full-set S: full-set
+; CHECK-NEXT: %b.vs = mul i64 %b, %vs
+; CHECK-NEXT: --> (vscale * %b) U: full-set S: full-set
+; CHECK-NEXT: %div = udiv i64 %a.vs, %b.vs
+; CHECK-NEXT: --> ((vscale * %a) /u (vscale * %b)) U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @udiv_mul_common_vscale_factor
+;
+ %vs = call i64 @llvm.vscale()
+ %a.vs = mul i64 %a, %vs
+ %b.vs = mul i64 %b, %vs
+ %div = udiv i64 %a.vs, %b.vs
+ ret i64 %div
+}
+
+define noundef i64 @udiv_mul_nuw_common_vscale_factor(i64 %a, i64 %b) {
+; CHECK-LABEL: 'udiv_mul_nuw_common_vscale_factor'
+; CHECK-NEXT: Classifying expressions for: @udiv_mul_nuw_common_vscale_factor
+; CHECK-NEXT: %vs = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: --> vscale U: [1,0) S: [1,0)
+; CHECK-NEXT: %a.vs = mul nuw i64 %a, %vs
+; CHECK-NEXT: --> (vscale * %a)<nuw> U: full-set S: full-set
+; CHECK-NEXT: %b.vs = mul nuw i64 %b, %vs
+; CHECK-NEXT: --> (vscale * %b)<nuw> U: full-set S: full-set
+; CHECK-NEXT: %div = udiv i64 %a.vs, %b.vs
+; CHECK-NEXT: --> (%a /u %b) U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @udiv_mul_nuw_common_vscale_factor
+;
+ %vs = call i64 @llvm.vscale()
+ %a.vs = mul nuw i64 %a, %vs
+ %b.vs = mul nuw i64 %b, %vs
+ %div = udiv i64 %a.vs, %b.vs
+ ret i64 %div
+}
diff --git a/llvm/test/CMakeLists.txt b/llvm/test/CMakeLists.txt
index 4db7663..e810fcb6 100644
--- a/llvm/test/CMakeLists.txt
+++ b/llvm/test/CMakeLists.txt
@@ -71,7 +71,6 @@ set(LLVM_TEST_DEPENDS
${LLVM_TEST_DEPENDS_COMMON}
BugpointPasses
LLVMWindowsDriver
- UnitTests
bugpoint
llc
lli
@@ -248,7 +247,7 @@ if (LLVM_INCLUDE_SPIRV_TOOLS_TESTS)
list(APPEND LLVM_TEST_DEPENDS spirv-link)
endif()
-add_custom_target(llvm-test-depends DEPENDS ${LLVM_TEST_DEPENDS})
+add_custom_target(llvm-test-depends DEPENDS ${LLVM_TEST_DEPENDS} UnitTests)
set_target_properties(llvm-test-depends PROPERTIES FOLDER "LLVM/Tests")
if(LLVM_BUILD_TOOLS)
@@ -260,7 +259,7 @@ endif()
add_lit_testsuite(check-llvm "Running the LLVM regression tests"
${CMAKE_CURRENT_BINARY_DIR}
${exclude_from_check_all}
- DEPENDS ${LLVM_TEST_DEPENDS}
+ DEPENDS ${LLVM_TEST_DEPENDS} UnitTests
)
set_target_properties(check-llvm PROPERTIES FOLDER "LLVM/Tests")
@@ -270,10 +269,11 @@ add_lit_testsuites(LLVM ${CMAKE_CURRENT_SOURCE_DIR}
${exclude_from_check_all}
DEPENDS ${LLVM_TEST_DEPENDS}
FOLDER "Tests/Subdirectories"
- SKIP "^FileCheck" "^TableGen"
+ SKIP "^FileCheck" "^TableGen" "^Unit"
)
add_subdirectory(FileCheck)
add_subdirectory(TableGen)
+add_subdirectory(Unit)
# Setup an alias for 'check-all'.
add_custom_target(check)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-modf.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-modf.mir
new file mode 100644
index 0000000..36ac7eb
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-modf.mir
@@ -0,0 +1,206 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -mtriple=aarch64 -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_modf_f16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_modf_f16
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
+ ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[COPY]](s16)
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[FPEXT]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[LOAD]](s32)
+ ; CHECK-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: $h0 = COPY [[FPTRUNC1]](s16)
+ ; CHECK-NEXT: $h1 = COPY [[FPTRUNC]](s16)
+ ; CHECK-NEXT: RET_ReallyLR implicit $h0, implicit $h1
+ %0:_(s16) = COPY $h0
+ %1:_(s16), %2:_(s16) = G_FMODF %0
+ $h0 = COPY %1(s16)
+ $h1 = COPY %2(s16)
+ RET_ReallyLR implicit $h0, implicit $h1
+...
+---
+name: test_modf_f16_only_use_fractional_part
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_modf_f16_only_use_fractional_part
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
+ ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[COPY]](s16)
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[FPEXT]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: $h0 = COPY [[FPTRUNC]](s16)
+ ; CHECK-NEXT: RET_ReallyLR implicit $h0
+ %0:_(s16) = COPY $h0
+ %1:_(s16), %2:_(s16) = G_FMODF %0
+ $h0 = COPY %1(s16)
+ RET_ReallyLR implicit $h0
+...
+---
+name: test_modf_v2f16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_modf_v2f16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[FPEXT]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %stack.1)
+ ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[LOAD]](s32)
+ ; CHECK-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[FPEXT1]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX1]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[LOAD1]](s32)
+ ; CHECK-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY2]](s32)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC1]](s16), [[FPTRUNC3]](s16), [[DEF]](s16), [[DEF]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC2]](s16), [[DEF]](s16), [[DEF]](s16)
+ ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; CHECK-NEXT: $d1 = COPY [[BUILD_VECTOR1]](<4 x s16>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1
+ %1:_(<4 x s16>) = COPY $d0
+ %0:_(<2 x s16>), %2:_(<2 x s16>) = G_UNMERGE_VALUES %1(<4 x s16>)
+ %3:_(<2 x s16>), %4:_(<2 x s16>) = G_FMODF %0
+ %5:_(s16), %6:_(s16) = G_UNMERGE_VALUES %3(<2 x s16>)
+ %7:_(s16) = G_IMPLICIT_DEF
+ %8:_(<4 x s16>) = G_BUILD_VECTOR %5(s16), %6(s16), %7(s16), %7(s16)
+ %9:_(s16), %10:_(s16) = G_UNMERGE_VALUES %4(<2 x s16>)
+ %11:_(<4 x s16>) = G_BUILD_VECTOR %9(s16), %10(s16), %7(s16), %7(s16)
+ $d0 = COPY %8(<4 x s16>)
+ $d1 = COPY %11(<4 x s16>)
+ RET_ReallyLR implicit $d0, implicit $d1
+...
+---
+name: test_modf_v3f32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_modf_v3f32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BITCAST]](<4 x s32>)
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.2
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[UV]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %stack.2)
+ ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[UV1]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX1]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s32) from %stack.1)
+ ; CHECK-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[UV2]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX2]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[DEF]](s32)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[DEF]](s32)
+ ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<4 x s32>)
+ ; CHECK-NEXT: $q1 = COPY [[BUILD_VECTOR1]](<4 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
+ %1:_(<2 x s64>) = COPY $q0
+ %2:_(<4 x s32>) = G_BITCAST %1(<2 x s64>)
+ %3:_(s32), %4:_(s32), %5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %2(<4 x s32>)
+ %0:_(<3 x s32>) = G_BUILD_VECTOR %3(s32), %4(s32), %5(s32)
+ %7:_(<3 x s32>), %8:_(<3 x s32>) = G_FMODF %0
+ %9:_(s32), %10:_(s32), %11:_(s32) = G_UNMERGE_VALUES %7(<3 x s32>)
+ %12:_(s32) = G_IMPLICIT_DEF
+ %13:_(<4 x s32>) = G_BUILD_VECTOR %9(s32), %10(s32), %11(s32), %12(s32)
+ %14:_(s32), %15:_(s32), %16:_(s32) = G_UNMERGE_VALUES %8(<3 x s32>)
+ %17:_(<4 x s32>) = G_BUILD_VECTOR %14(s32), %15(s32), %16(s32), %12(s32)
+ $q0 = COPY %13(<4 x s32>)
+ $q1 = COPY %17(<4 x s32>)
+ RET_ReallyLR implicit $q0, implicit $q1
+...
+---
+name: test_modf_v2f64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_modf_v2f64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $d0 = COPY [[UV]](s64)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $d0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %stack.1)
+ ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $d0 = COPY [[UV1]](s64)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX1]](p0)
+ ; CHECK-NEXT: BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s64) from %stack.0)
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY1]](s64), [[COPY2]](s64)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64)
+ ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ; CHECK-NEXT: $q1 = COPY [[BUILD_VECTOR1]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
+ %0:_(<2 x s64>) = COPY $q0
+ %1:_(<2 x s64>), %2:_(<2 x s64>) = G_FMODF %0
+ $q0 = COPY %1(<2 x s64>)
+ $q1 = COPY %2(<2 x s64>)
+ RET_ReallyLR implicit $q0, implicit $q1
+...
+---
+name: test_modf_fp128
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_modf_fp128
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $q0
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $q0 = COPY [[COPY]](s128)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL &modfl, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $q0, implicit $x0, implicit-def $q0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s128) = COPY $q0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s128) from %stack.0)
+ ; CHECK-NEXT: $q0 = COPY [[COPY1]](s128)
+ ; CHECK-NEXT: $q1 = COPY [[LOAD]](s128)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
+ %0:_(s128) = COPY $q0
+ %1:_(s128), %2:_(s128) = G_FMODF %0
+ $q0 = COPY %1(s128)
+ $q1 = COPY %2(s128)
+ RET_ReallyLR implicit $q0, implicit $q1
+...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index ba867f4..d721b73c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -508,6 +508,10 @@
# DEBUG-NEXT: G_FREM (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
+# DEBUG-NEXT: G_FMODF (opcode {{[0-9]+}}): 1 type index, 0 imm indices
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
+# DEBUG-NEXT: .. the first uncovered type index: 1, OK
+# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: G_FPOW (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-modf.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-modf.mir
new file mode 100644
index 0000000..604cb96
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-modf.mir
@@ -0,0 +1,136 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-unknown -run-pass=instruction-select %s -o - | FileCheck %s
+---
+name: test_modf_fp128
+alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+liveins:
+ - { reg: '$q0' }
+frameInfo:
+ maxAlignment: 16
+stack:
+ - { id: 0, size: 16, alignment: 16 }
+body: |
+ bb.1:
+ liveins: $q0
+
+ ; CHECK-LABEL: name: test_modf_fp128
+ ; CHECK: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $q0 = COPY [[COPY]]
+ ; CHECK-NEXT: $x0 = COPY [[ADDXri]]
+ ; CHECK-NEXT: BL &modfl, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $q0, implicit $x0, implicit-def $q0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-NEXT: $q0 = COPY [[COPY1]]
+ ; CHECK-NEXT: $q1 = COPY [[LDRQui]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
+ %0:fpr(s128) = COPY $q0
+ %3:gpr(p0) = G_FRAME_INDEX %stack.0
+ ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ $q0 = COPY %0(s128)
+ $x0 = COPY %3(p0)
+ BL &modfl, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $q0, implicit $x0, implicit-def $q0
+ ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ %1:fpr(s128) = COPY $q0
+ %2:fpr(s128) = G_LOAD %3(p0) :: (load (s128) from %stack.0)
+ $q0 = COPY %1(s128)
+ $q1 = COPY %2(s128)
+ RET_ReallyLR implicit $q0, implicit $q1
+...
+---
+name: test_modf_double
+alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+liveins:
+ - { reg: '$d0' }
+frameInfo:
+ maxAlignment: 8
+stack:
+ - { id: 0, size: 8, alignment: 8 }
+machineFunctionInfo: {}
+body: |
+ bb.1:
+ liveins: $d0
+
+ ; CHECK-LABEL: name: test_modf_double
+ ; CHECK: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $d0 = COPY [[COPY]]
+ ; CHECK-NEXT: $x0 = COPY [[ADDXri]]
+ ; CHECK-NEXT: BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui %stack.0, 0 :: (load (s64) from %stack.0)
+ ; CHECK-NEXT: $d0 = COPY [[COPY1]]
+ ; CHECK-NEXT: $d1 = COPY [[LDRDui]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1
+ %0:fpr(s64) = COPY $d0
+ %3:gpr(p0) = G_FRAME_INDEX %stack.0
+ ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ $d0 = COPY %0(s64)
+ $x0 = COPY %3(p0)
+ BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0
+ ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ %1:fpr(s64) = COPY $d0
+ %2:fpr(s64) = G_LOAD %3(p0) :: (load (s64) from %stack.0)
+ $d0 = COPY %1(s64)
+ $d1 = COPY %2(s64)
+ RET_ReallyLR implicit $d0, implicit $d1
+...
+---
+name: test_modf_double_vec
+alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+liveins:
+ - { reg: '$d0' }
+frameInfo:
+ maxAlignment: 8
+stack:
+ - { id: 0, size: 8, alignment: 8 }
+machineFunctionInfo: {}
+body: |
+ bb.1:
+ liveins: $d0
+
+ ; CHECK-LABEL: name: test_modf_double_vec
+ ; CHECK: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $d0 = COPY [[COPY]]
+ ; CHECK-NEXT: $x0 = COPY [[ADDXri]]
+ ; CHECK-NEXT: BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui %stack.0, 0 :: (load (s64) from %stack.0)
+ ; CHECK-NEXT: $d0 = COPY [[COPY1]]
+ ; CHECK-NEXT: $d1 = COPY [[LDRDui]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1
+ %0:fpr(s64) = COPY $d0
+ %3:gpr(p0) = G_FRAME_INDEX %stack.0
+ ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ $d0 = COPY %0(s64)
+ $x0 = COPY %3(p0)
+ BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0
+ ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ %1:fpr(s64) = COPY $d0
+ %2:fpr(s64) = G_LOAD %3(p0) :: (load (s64) from %stack.0)
+ $d0 = COPY %1(s64)
+ $d1 = COPY %2(s64)
+ RET_ReallyLR implicit $d0, implicit $d1
+...
diff --git a/llvm/test/CodeGen/AArch64/cbz_wzr.mir b/llvm/test/CodeGen/AArch64/cbz_wzr.mir
new file mode 100644
index 0000000..7deea56
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/cbz_wzr.mir
@@ -0,0 +1,260 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -o - %s -mtriple=aarch64-none-eabi -run-pass=machine-cp -mcp-use-is-copy-instr | FileCheck %s
+
+---
+name: cbz_wzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: cbz_wzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CBZW $wzr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $w8 = ORRWrs $wzr, $wzr, 0
+ CBZW killed renamable $w8, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+---
+name: cbnz_wzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: cbnz_wzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CBNZW $wzr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $w8 = ORRWrs $wzr, $wzr, 0
+ CBNZW killed renamable $w8, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+---
+name: tbz_wzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: tbz_wzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: TBZW $wzr, 0, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $w8 = ORRWrs $wzr, $wzr, 0
+ TBZW killed renamable $w8, 0, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+---
+name: tbnz_wzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: tbnz_wzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: TBNZW $wzr, 0, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $w8 = ORRWrs $wzr, $wzr, 0
+ TBNZW killed renamable $w8, 0, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+
+---
+name: cbz_xzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: cbz_xzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CBZX $xzr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $x8 = ORRXrs $xzr, $xzr, 0
+ CBZX killed renamable $x8, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+---
+name: cbnz_xzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: cbnz_xzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CBNZX $xzr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $x8 = ORRXrs $xzr, $xzr, 0
+ CBNZX killed renamable $x8, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+---
+name: tbz_xzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: tbz_xzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: TBZX $xzr, 0, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $x8 = ORRXrs $xzr, $xzr, 0
+ TBZX killed renamable $x8, 0, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+---
+name: tbnz_xzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: tbnz_xzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: TBNZX $xzr, 0, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $x8 = ORRXrs $xzr, $xzr, 0
+ TBNZX killed renamable $x8, 0, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
index aca2816..7fd0cee 100644
--- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
+++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
@@ -164,10 +164,10 @@ stack:
- { id: 1, name: z1.addr, size: 16, alignment: 16, stack-id: scalable-vector,
debug-info-variable: '!31', debug-info-expression: '!DIExpression()',
debug-info-location: '!32' }
- - { id: 2, name: p0.addr, size: 2, alignment: 2, stack-id: scalable-vector,
+ - { id: 2, name: p0.addr, size: 2, alignment: 2, stack-id: scalable-predicate-vector,
debug-info-variable: '!33', debug-info-expression: '!DIExpression()',
debug-info-location: '!34' }
- - { id: 3, name: p1.addr, size: 2, alignment: 2, stack-id: scalable-vector,
+ - { id: 3, name: p1.addr, size: 2, alignment: 2, stack-id: scalable-predicate-vector,
debug-info-variable: '!35', debug-info-expression: '!DIExpression()',
debug-info-location: '!36' }
- { id: 4, name: w0.addr, size: 4, alignment: 4, local-offset: -4, debug-info-variable: '!37',
@@ -181,10 +181,10 @@ stack:
- { id: 7, name: localv1, size: 16, alignment: 16, stack-id: scalable-vector,
debug-info-variable: '!45', debug-info-expression: '!DIExpression()',
debug-info-location: '!46' }
- - { id: 8, name: localp0, size: 2, alignment: 2, stack-id: scalable-vector,
+ - { id: 8, name: localp0, size: 2, alignment: 2, stack-id: scalable-predicate-vector,
debug-info-variable: '!48', debug-info-expression: '!DIExpression()',
debug-info-location: '!49' }
- - { id: 9, name: localp1, size: 2, alignment: 2, stack-id: scalable-vector,
+ - { id: 9, name: localp1, size: 2, alignment: 2, stack-id: scalable-predicate-vector,
debug-info-variable: '!51', debug-info-expression: '!DIExpression()',
debug-info-location: '!52' }
machineFunctionInfo: {}
diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir
index 0ea180b..41ba554 100644
--- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir
+++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir
@@ -96,8 +96,8 @@ stack:
- { id: 1, size: 8, alignment: 8 }
- { id: 2, size: 16, alignment: 16, stack-id: scalable-vector }
- { id: 3, size: 16, alignment: 16, stack-id: scalable-vector }
- - { id: 4, size: 2, alignment: 2, stack-id: scalable-vector }
- - { id: 5, size: 2, alignment: 2, stack-id: scalable-vector }
+ - { id: 4, size: 2, alignment: 2, stack-id: scalable-predicate-vector }
+ - { id: 5, size: 2, alignment: 2, stack-id: scalable-predicate-vector }
machineFunctionInfo: {}
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/AArch64/framelayout-split-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-split-sve.mir
new file mode 100644
index 0000000..35eafe8
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/framelayout-split-sve.mir
@@ -0,0 +1,587 @@
+# RUN: llc -mattr=+sve -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -mtriple=aarch64-none-linux-gnu -run-pass=prologepilog %s -o - | FileCheck %s
+# RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -start-before=prologepilog %s -o - | FileCheck %s --check-prefix=ASM
+# RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -start-before=prologepilog %s -filetype=obj -o %t
+# RUN: llvm-objdump --dwarf=frames %t | FileCheck %s --check-prefix=UNWINDINFO
+# RUN: rm -rf %t
+#
+# Test allocation and deallocation of SVE objects on the stack with
+# split-sve-objects (and hazard padding) enabled. This also tests using a
+# combination of scalable and non-scalable offsets to access the SVE on the
+# stack.
+#
+# With split-sve-objects (which implies hazard padding) the SVE area is split
+# into PPR and ZPR areas with (fixed-size) hazard padding between them. The PPR
+# area holds all scalable predicate callee saves and locals, and the ZPR area
+# holds all scalable vector callee saves and locals. Additionally, any FPR
+# callee save is promoted to a ZPR callee save (to avoid needing additional
+# hazard padding in the callee save area).
+#
+# +-------------+
+# | stack arg |
+# +-------------+ <- SP before call
+# | Callee Saves|
+# | Frame record| (if available)
+# |-------------| <- FP (if available)
+# | PPR area |
+# |-------------|
+# |/////////////| hazard padding
+# |-------------|
+# | ZPR area |
+# +-------------+
+# | : |
+# | Stack objs |
+# | : |
+# +-------------+ <- SP after call and frame-setup
+#
+--- |
+
+ define void @test_allocate_split_sve() uwtable { entry: unreachable }
+ define void @test_allocate_split_sve_realigned() uwtable { entry: unreachable }
+ define void @test_address_split_sve() uwtable { entry: unreachable }
+ define void @test_address_split_sve_fp() uwtable { entry: unreachable }
+ define aarch64_sve_vector_pcs void @save_restore_ppr_zpr() uwtable { entry: unreachable }
+
+...
+---
+# +----------+
+# |scratchreg| // x29 is used as scratch reg.
+# |----------|
+# | %stack.0 | // scalable predicate of n * 12 bytes, aligned to 16 bytes
+# | | // to be materialized with 1*ADDVL (<=> n * 16 bytes)
+# |----------|
+# |//////////| // hazard padding (1024 bytes) -- part of PPR locals area
+# |//////////| // Note: This is currently not included in the "stackSize"
+# +----------+
+# | %stack.0 | // scalable SVE object of n * 18 bytes, aligned to 16 bytes,
+# | | // to be materialized with 2*ADDVL (<=> 2 * n * 16 bytes)
+# +----------+
+# |//////////| // hazard padding (1024 bytes)
+# |----------|
+# | %stack.1 | // not scalable
+# +----------+ <- SP
+
+# CHECK-LABEL: name: test_allocate_split_sve
+# CHECK: stackSize: 1056
+
+# CHECK: bb.0.entry:
+# CHECK: liveins: $z0, $p0, $fp
+# CHECK: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.4)
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 1040
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22
+#
+# CHECK-NEXT: $x8 = ADDXri $sp, 1040, 0
+# CHECK-NEXT: $x8 = ADDPL_XXI $x8, 7, implicit $vg
+# CHECK-NEXT: STR_ZXI $z0, killed $x8, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
+# CHECK-NEXT: $x8 = ADDXri $sp, 2064, 0
+# CHECK-NEXT: STR_PXI $p0, killed $x8, 18 :: (store (<vscale x 1 x s16>) into %stack.1)
+#
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 1056
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 16
+# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.4)
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
+# CHECK-NEXT: RET_ReallyLR
+
+# ASM-LABEL: test_allocate_split_sve:
+# ASM: str x29, [sp, #-16]!
+# ASM-NEXT: .cfi_def_cfa_offset 16
+# ASM-NEXT: .cfi_offset w29, -16
+# ASM-NEXT: sub sp, sp, #1024
+# ASM-NEXT: .cfi_def_cfa_offset 1040
+# ASM-NEXT: addvl sp, sp, #-1
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1040 + 8 * VG
+# ASM-NEXT: sub sp, sp, #1040
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG
+# ASM-NEXT: addvl sp, sp, #-2
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 2080 + 24 * VG
+#
+# ASM: addvl sp, sp, #2
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG
+# ASM-NEXT: add sp, sp, #1024
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1056 + 8 * VG
+# ASM-NEXT: addvl sp, sp, #1
+# ASM-NEXT: .cfi_def_cfa wsp, 1056
+# ASM-NEXT: add sp, sp, #1040
+# ASM-NEXT: .cfi_def_cfa_offset 16
+# ASM-NEXT: ldr x29, [sp], #16
+# ASM-NEXT: .cfi_def_cfa_offset 0
+# ASM-NEXT: .cfi_restore w29
+
+# UNWINDINFO: DW_CFA_def_cfa_offset: +16
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
+# UNWINDINFO: DW_CFA_def_cfa_offset: +1040
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus
+#
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1056, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa: reg31 +1056
+# UNWINDINFO: DW_CFA_def_cfa_offset: +16
+# UNWINDINFO: DW_CFA_def_cfa_offset: +0
+# UNWINDINFO-NEXT: DW_CFA_restore: reg29
+
+name: test_allocate_split_sve
+stack:
+ - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 }
+ - { id: 1, stack-id: scalable-vector, size: 12, alignment: 2 }
+ - { id: 2, stack-id: default, size: 16, alignment: 8 }
+body: |
+ bb.0.entry:
+ liveins: $z0, $p0
+ STR_ZXI $z0, %stack.0, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
+ STR_PXI $p0, %stack.1, 0 :: (store (<vscale x 1 x s16>) into %stack.1)
+ RET_ReallyLR
+...
+---
+
+# Stack realignment is not supported with split-sve-objects, so we fallback to
+# the default hazard padding implementation. This does not prevent hazards
+# between ZPRs and PPRs (TODO: support this case).
+#
+# +----------+
+# | lr, fp | // frame record
+# |----------|
+# |//////////| // hazard padding (1024 bytes)
+# |----------|
+# | %stack.0 | // scalable predicate of n * 12 bytes, aligned to 16 bytes
+# | | // to be materialized with 1*ADDVL (<=> n * 16 bytes)
+# +----------+
+# | %stack.0 | // scalable SVE object of n * 18 bytes, aligned to 16 bytes,
+# | | // to be materialized with 2*ADDVL (<=> 2 * n * 16 bytes)
+# +----------+
+# |//////////| // hazard padding (1024 bytes)
+# |----------|
+# | %stack.1 | // not scalable
+# +----------+ <- SP
+
+name: test_allocate_split_sve_realigned
+stack:
+ - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 }
+ - { id: 1, stack-id: scalable-vector, size: 12, alignment: 2 }
+ - { id: 2, stack-id: default, size: 16, alignment: 32 }
+body: |
+ bb.0.entry:
+ liveins: $z0, $p0
+ STR_ZXI $z0, %stack.0, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
+ STR_PXI $p0, %stack.1, 0 :: (store (<vscale x 1 x s16>) into %stack.1)
+ RET_ReallyLR
+
+# CHECK-LABEL: name: test_allocate_split_sve_realigned
+# CHECK: stackSize: 2080
+
+# CHECK: bb.0.entry:
+# CHECK: liveins: $z0, $p0, $lr
+# CHECK: $sp = frame-setup SUBXri $sp, 1040, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 1040
+# CHECK-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.5)
+# CHECK-NEXT: frame-setup STRXui killed $lr, $sp, 129 :: (store (s64) into %stack.4)
+# CHECK-NEXT: $fp = frame-setup ADDXri $sp, 1024, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $w29, 16
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -8
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
+# CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 1040, 0
+# CHECK-NEXT: $[[TMP]] = frame-setup ADDVL_XXI $[[TMP]], -2, implicit $vg
+# CHECK-NEXT: $sp = frame-setup ANDXri killed $x9, 7930
+#
+# CHECK-NEXT: $x8 = SUBXri $fp, 1024, 0
+# CHECK-NEXT: $x8 = ADDPL_XXI $x8, -1, implicit $vg
+# CHECK-NEXT: STR_ZXI $z0, killed $x8, -1 :: (store (<vscale x 1 x s128>) into %stack.0)
+# CHECK-NEXT: $x8 = SUBXri $fp, 1024, 0
+# CHECK-NEXT: STR_PXI $p0, killed $x8, -15 :: (store (<vscale x 1 x s16>) into %stack.1)
+#
+# CHECK-NEXT: $sp = frame-destroy SUBXri $fp, 1024, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 1040
+# CHECK-NEXT: $lr = frame-destroy LDRXui $sp, 129 :: (load (s64) from %stack.4)
+# CHECK-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.5)
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w30
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
+# CHECK-NEXT: RET_ReallyLR
+
+# ASM-LABEL: test_allocate_split_sve_realigned
+# ASM: sub sp, sp, #1040
+# ASM-NEXT: .cfi_def_cfa_offset 1040
+# ASM-NEXT: str x29, [sp, #1024]
+# ASM-NEXT: str x30, [sp, #1032]
+# ASM-NEXT: add x29, sp, #1024
+# ASM-NEXT: .cfi_def_cfa w29, 16
+# ASM-NEXT: .cfi_offset w30, -8
+# ASM-NEXT: .cfi_offset w29, -16
+#
+# ASM: sub sp, x29, #1024
+# ASM-NEXT: .cfi_def_cfa wsp, 1040
+# ASM-NEXT: ldr x30, [sp, #1032]
+# ASM-NEXT: ldr x29, [sp, #1024]
+# ASM-NEXT: add sp, sp, #1040
+# ASM-NEXT: .cfi_def_cfa_offset 0
+# ASM-NEXT: .cfi_restore w30
+# ASM-NEXT: .cfi_restore w29
+
+# UNWINDINFO: DW_CFA_def_cfa_offset: +1040
+# UNWINDINFO: DW_CFA_def_cfa: reg29 +16
+# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
+#
+# UNWINDINFO: DW_CFA_def_cfa: reg31 +1040
+# UNWINDINFO: DW_CFA_def_cfa_offset: +0
+# UNWINDINFO-NEXT: DW_CFA_restore: reg30
+# UNWINDINFO-NEXT: DW_CFA_restore: reg29
+...
+---
+
+# +----------+
+# |scratchreg| // x29 is used as scratch reg.
+# +----------+
+# | %stack.2 | // scalable predicate @ SP + 2064b + 46 scalable bytes
+# |----------|
+# |//////////| // hazard padding (1024 bytes) -- part of PPR locals area
+# |//////////| // Note: This is currently not included in the "stackSize"
+# |----------|
+# | %stack.0 | // scalable vector @ SP + 1040b + 16 scalable bytes
+# | %stack.1 | // scalable vector @ SP + 1040b
+# +----------+
+# |//////////| // hazard padding (1024 bytes)
+# |----------|
+# | %stack.3 | // not scalable
+# +----------+ <- SP
+
+# CHECK-LABEL: name: test_address_split_sve
+# CHECK: stackSize: 1056
+
+# CHECK: bb.0.entry:
+# CHECK-NEXT: liveins:
+# CHECK-NEXT: {{ $}}
+# CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.5)
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 1040
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22
+#
+# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 1040, 0
+# CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], 1
+# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 1040, 0
+# CHECK-NEXT: STR_ZXI $z1, killed $[[TMP]], 0
+# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 2064, 0
+# CHECK-NEXT: STR_PXI $p0, killed $[[TMP]], 23
+#
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 1056
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 16
+# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.5)
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
+# CHECK-NEXT: RET_ReallyLR
+
+# ASM-LABEL: test_address_split_sve
+# ASM: str x29, [sp, #-16]!
+# ASM-NEXT: .cfi_def_cfa_offset 16
+# ASM-NEXT: .cfi_offset w29, -16
+# ASM-NEXT: sub sp, sp, #1024
+# ASM-NEXT: .cfi_def_cfa_offset 1040
+# ASM-NEXT: addvl sp, sp, #-1
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1040 + 8 * VG
+# ASM-NEXT: sub sp, sp, #1040
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG
+# ASM-NEXT: addvl sp, sp, #-2
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 2080 + 24 * VG
+#
+# ASM: addvl sp, sp, #2
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG
+# ASM-NEXT: add sp, sp, #1024
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1056 + 8 * VG
+# ASM-NEXT: addvl sp, sp, #1
+# ASM-NEXT: .cfi_def_cfa wsp, 1056
+# ASM-NEXT: add sp, sp, #1040
+# ASM-NEXT: .cfi_def_cfa_offset 16
+# ASM-NEXT: ldr x29, [sp], #16
+# ASM-NEXT: .cfi_def_cfa_offset 0
+# ASM-NEXT: .cfi_restore w29
+
+# UNWINDINFO: DW_CFA_def_cfa_offset: +16
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
+# UNWINDINFO: DW_CFA_def_cfa_offset: +1040
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus
+#
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1056, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa: reg31 +1056
+# UNWINDINFO: DW_CFA_def_cfa_offset: +16
+# UNWINDINFO: DW_CFA_def_cfa_offset: +0
+# UNWINDINFO-NEXT: DW_CFA_restore: reg29
+
+name: test_address_split_sve
+frameInfo:
+ maxAlignment: 16
+stack:
+ - { id: 0, stack-id: scalable-vector, size: 16, alignment: 8 }
+ - { id: 1, stack-id: scalable-vector, size: 16, alignment: 8 }
+ - { id: 2, stack-id: scalable-vector, size: 2, alignment: 2 }
+ - { id: 3, stack-id: default, size: 16, alignment: 8 }
+body: |
+ bb.0.entry:
+ liveins: $z0, $z1, $p0
+
+ STR_ZXI $z0, %stack.0, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
+ STR_ZXI $z1, %stack.1, 0 :: (store (<vscale x 1 x s128>) into %stack.1)
+ STR_PXI $p0, %stack.2, 0 :: (store (<vscale x 1 x s16>) into %stack.2)
+
+ RET_ReallyLR
+...
+---
+# +----------+
+# | lr, fp | // frame record
+# +----------+ <- FP
+# | %stack.2 | // scalable predicate @ FP - 2 scalable bytes
+# |----------|
+# |//////////| // hazard padding (1024 bytes) -- part of PPR locals area
+# |//////////| // Note: This is currently not included in the "stackSize"
+# |----------|
+# | %stack.0 | // scalable vector @ FP - 1024b - 32 scalable bytes
+# | %stack.1 | // scalable vector @ FP - 1024b - 48 scalable bytes
+# +----------+
+# |//////////| // hazard padding (1024 bytes)
+# |----------|
+# | %stack.3 | // not scalable
+# +----------+ <- SP
+
+# CHECK-LABEL: name: test_address_split_sve_fp
+# CHECK: stackSize: 1056
+#
+# CHECK: bb.0.entry:
+# CHECK-NEXT: liveins:
+# CHECK-NEXT: {{ $}}
+# CHECK-NEXT: early-clobber $sp = frame-setup STPXpre killed $fp, killed $lr, $sp, -2 :: (store (s64) into %stack.6), (store (s64) into %stack.5)
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+# CHECK-NEXT: $fp = frame-setup ADDXri $sp, 0, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $w29, 16
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -8
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
+#
+# CHECK-NEXT: $[[TMP:x[0-9]+]] = SUBXri $fp, 1024, 0
+# CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], -2
+# CHECK-NEXT: $[[TMP:x[0-9]+]] = SUBXri $fp, 1024, 0
+# CHECK-NEXT: STR_ZXI $z1, killed $[[TMP]], -3
+# CHECK-NEXT: STR_PXI $p0, $fp, -1
+#
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
+# CHECK-NEXT: early-clobber $sp, $fp, $lr = frame-destroy LDPXpost $sp, 2 :: (load (s64) from %stack.6), (load (s64) from %stack.5)
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w30
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
+# CHECK-NEXT: RET_ReallyLR
+
+# ASM-LABEL: test_address_split_sve_fp
+# ASM: stp x29, x30, [sp, #-16]!
+# ASM-NEXT: .cfi_def_cfa_offset 16
+# ASM-NEXT: mov x29, sp
+# ASM-NEXT: .cfi_def_cfa w29, 16
+# ASM-NEXT: .cfi_offset w30, -8
+# ASM-NEXT: .cfi_offset w29, -16
+# ASM-NEXT: sub sp, sp, #1024
+# ASM-NEXT: addvl sp, sp, #-1
+# ASM-NEXT: sub sp, sp, #1040
+# ASM-NEXT: addvl sp, sp, #-2
+#
+# ASM: addvl sp, sp, #2
+# ASM-NEXT: add sp, sp, #1024
+# ASM-NEXT: addvl sp, sp, #1
+# ASM-NEXT: add sp, sp, #1040
+# ASM-NEXT: .cfi_def_cfa wsp, 16
+# ASM-NEXT: ldp x29, x30, [sp], #16
+# ASM-NEXT: .cfi_def_cfa_offset 0
+# ASM-NEXT: .cfi_restore w30
+# ASM-NEXT: .cfi_restore w29
+
+# UNWINDINFO: DW_CFA_def_cfa_offset: +16
+# UNWINDINFO: DW_CFA_def_cfa: reg29 +16
+# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
+#
+# UNWINDINFO: DW_CFA_def_cfa: reg31 +16
+# UNWINDINFO: DW_CFA_def_cfa_offset: +0
+# UNWINDINFO-NEXT: DW_CFA_restore: reg30
+# UNWINDINFO-NEXT: DW_CFA_restore: reg29
+
+name: test_address_split_sve_fp
+frameInfo:
+ maxAlignment: 16
+ isFrameAddressTaken: true
+stack:
+ - { id: 0, stack-id: scalable-vector, size: 16, alignment: 8 }
+ - { id: 1, stack-id: scalable-vector, size: 16, alignment: 8 }
+ - { id: 2, stack-id: scalable-vector, size: 2, alignment: 2 }
+ - { id: 3, stack-id: default, size: 16, alignment: 8 }
+body: |
+ bb.0.entry:
+ liveins: $z0, $z1, $p0
+
+ STR_ZXI $z0, %stack.0, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
+ STR_ZXI $z1, %stack.1, 0 :: (store (<vscale x 1 x s128>) into %stack.1)
+ STR_PXI $p0, %stack.2, 0 :: (store (<vscale x 1 x s16>) into %stack.2)
+
+ RET_ReallyLR
+...
+---
+# CHECK-LABEL: name: save_restore_ppr_zpr
+# CHECK: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.8)
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: frame-setup STR_PXI killed $p6, $sp, 5 :: (store (s16) into %stack.7)
+# CHECK-NEXT: frame-setup STR_PXI killed $p5, $sp, 6 :: (store (s16) into %stack.6)
+# CHECK-NEXT: frame-setup STR_PXI killed $p4, $sp, 7 :: (store (s16) into %stack.5)
+#
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+#
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3, implicit $vg
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
+# CHECK-NEXT: frame-setup STR_ZXI killed $z10, $sp, 0 :: (store (s128) into %stack.4)
+# CHECK-NEXT: frame-setup STR_ZXI killed $z9, $sp, 1 :: (store (s128) into %stack.3)
+# CHECK-NEXT: frame-setup STR_ZXI killed $z8, $sp, 2 :: (store (s128) into %stack.2)
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1056, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0a, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
+#
+#
+# CHECK: $sp = frame-destroy ADDXri $sp, 1056, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
+# CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.4)
+# CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.3)
+# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.2)
+#
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
+#
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3, implicit $vg
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z9
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z10
+# CHECK-NEXT: $p6 = frame-destroy LDR_PXI $sp, 5 :: (load (s16) from %stack.7)
+# CHECK-NEXT: $p5 = frame-destroy LDR_PXI $sp, 6 :: (load (s16) from %stack.6)
+# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7 :: (load (s16) from %stack.5)
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
+# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.8)
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
+# CHECK-NEXT: RET_ReallyLR
+
+# ASM-LABEL: save_restore_ppr_zpr:
+# ASM: str x29, [sp, #-16]!
+# ASM-NEXT: .cfi_def_cfa_offset 16
+# ASM-NEXT: .cfi_offset w29, -16
+# ASM-NEXT: addvl sp, sp, #-1
+# ASM-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+# ASM-NEXT: str p6, [sp, #5, mul vl]
+# ASM-NEXT: str p5, [sp, #6, mul vl]
+# ASM-NEXT: str p4, [sp, #7, mul vl]
+# ASM-NEXT: sub sp, sp, #1024
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1040 + 8 * VG
+# ASM-NEXT: addvl sp, sp, #-3
+# ASM-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 1040 + 32 * VG
+# ASM-NEXT: str z10, [sp]
+# ASM-NEXT: str z9, [sp, #1, mul vl]
+# ASM-NEXT: str z8, [sp, #2, mul vl]
+# ASM-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 16 * VG - 1040
+# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d9 @ cfa - 24 * VG - 1040
+# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d10 @ cfa - 32 * VG - 1040
+# ASM-NEXT: sub sp, sp, #1056
+# ASM-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 2096 + 32 * VG
+#
+# ASM: add sp, sp, #1056
+# ASM-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 1040 + 32 * VG
+# ASM-NEXT: ldr z10, [sp]
+# ASM-NEXT: ldr z9, [sp, #1, mul vl]
+# ASM-NEXT: ldr z8, [sp, #2, mul vl]
+# ASM-NEXT: add sp, sp, #1024
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
+# ASM-NEXT: addvl sp, sp, #3
+# ASM-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+# ASM-NEXT: .cfi_restore z8
+# ASM-NEXT: .cfi_restore z9
+# ASM-NEXT: .cfi_restore z10
+# ASM-NEXT: ldr p6, [sp, #5, mul vl]
+# ASM-NEXT: ldr p5, [sp, #6, mul vl]
+# ASM-NEXT: ldr p4, [sp, #7, mul vl]
+# ASM-NEXT: addvl sp, sp, #1
+# ASM-NEXT: .cfi_def_cfa wsp, 16
+# ASM-NEXT: ldr x29, [sp], #16
+# ASM-NEXT: .cfi_def_cfa_offset 0
+# ASM-NEXT: .cfi_restore w29
+
+# UNWINDINFO: DW_CFA_def_cfa_offset: +16
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_consts -1040, DW_OP_plus
+# UNWINDINFO: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_consts -1040, DW_OP_plus
+# UNWINDINFO: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -32, DW_OP_mul, DW_OP_plus, DW_OP_consts -1040, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2096, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus
+#
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104
+# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105
+# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg106
+# UNWINDINFO: DW_CFA_def_cfa: reg31 +16
+# UNWINDINFO: DW_CFA_def_cfa_offset: +0
+# UNWINDINFO-NEXT: DW_CFA_restore: reg29
+
+name: save_restore_ppr_zpr
+stack:
+ - { id: 0, stack-id: default, size: 32, alignment: 16 }
+body: |
+ bb.0.entry:
+
+ $p4 = IMPLICIT_DEF
+ $p5 = IMPLICIT_DEF
+ $p6 = IMPLICIT_DEF
+ $z8 = IMPLICIT_DEF
+ $z9 = IMPLICIT_DEF
+ $z10 = IMPLICIT_DEF
+
+ RET_ReallyLR
diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
index 03a6aab..1101416 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
@@ -1215,19 +1215,19 @@ body: |
# CHECK: - { id: 2, name: '', type: default, offset: -112, size: 16, alignment: 16,
# CHECK-NEXT: stack-id: scalable-vector,
# CHECK: - { id: 3, name: '', type: default, offset: -114, size: 2, alignment: 2,
-# CHECK-NEXT: stack-id: scalable-vector,
+# CHECK-NEXT: stack-id: scalable-predicate-vector,
# CHECK: - { id: 4, name: '', type: spill-slot, offset: -144, size: 16, alignment: 16,
# CHECK-NEXT: stack-id: scalable-vector,
# CHECK: - { id: 5, name: '', type: spill-slot, offset: -146, size: 2, alignment: 2,
-# CHECK-NEXT: stack-id: scalable-vector,
+# CHECK-NEXT: stack-id: scalable-predicate-vector,
# CHECK: - { id: 6, name: '', type: spill-slot, offset: -16, size: 16, alignment: 16,
# CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$z8',
# CHECK: - { id: 7, name: '', type: spill-slot, offset: -32, size: 16, alignment: 16,
# CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$z23',
# CHECK: - { id: 8, name: '', type: spill-slot, offset: -34, size: 2, alignment: 2,
-# CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$p4',
+# CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '$p4',
# CHECK: - { id: 9, name: '', type: spill-slot, offset: -36, size: 2, alignment: 2,
-# CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$p15',
+# CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '$p15',
# CHECK: - { id: 10, name: '', type: spill-slot, offset: -16, size: 8, alignment: 16,
# CHECK-NEXT: stack-id: default, callee-saved-register: '$fp',
#
@@ -1295,9 +1295,9 @@ stack:
- { id: 0, type: default, size: 32, alignment: 16, stack-id: scalable-vector }
- { id: 1, type: default, size: 4, alignment: 2, stack-id: scalable-vector }
- { id: 2, type: default, size: 16, alignment: 16, stack-id: scalable-vector }
- - { id: 3, type: default, size: 2, alignment: 2, stack-id: scalable-vector }
+ - { id: 3, type: default, size: 2, alignment: 2, stack-id: scalable-predicate-vector }
- { id: 4, type: spill-slot, size: 16, alignment: 16, stack-id: scalable-vector }
- - { id: 5, type: spill-slot, size: 2, alignment: 2, stack-id: scalable-vector }
+ - { id: 5, type: spill-slot, size: 2, alignment: 2, stack-id: scalable-predicate-vector }
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/AArch64/freeze.ll b/llvm/test/CodeGen/AArch64/freeze.ll
index fae3bbe..fb909fe 100644
--- a/llvm/test/CodeGen/AArch64/freeze.ll
+++ b/llvm/test/CodeGen/AArch64/freeze.ll
@@ -466,15 +466,12 @@ define <8 x i16> @freeze_urhadd(<8 x i16> %a0, <8 x i16> %a1) {
ret <8 x i16> %masked
}
-; TODO: Unnecessary sext_inreg
define <8 x i16> @freeze_shadd(<8 x i8> %a0, <8 x i16> %a1) {
; CHECK-LABEL: freeze_shadd:
; CHECK: // %bb.0:
; CHECK-NEXT: sshll v0.8h, v0.8b, #0
; CHECK-NEXT: sshr v1.8h, v1.8h, #8
; CHECK-NEXT: shadd v0.8h, v0.8h, v1.8h
-; CHECK-NEXT: shl v0.8h, v0.8h, #8
-; CHECK-NEXT: sshr v0.8h, v0.8h, #8
; CHECK-NEXT: ret
%x0 = sext <8 x i8> %a0 to <8 x i16>
%x1 = ashr <8 x i16> %a1, splat (i16 8)
@@ -485,15 +482,12 @@ define <8 x i16> @freeze_shadd(<8 x i8> %a0, <8 x i16> %a1) {
ret <8 x i16> %sext
}
-; TODO: Unnecessary sext_inreg
define <8 x i16> @freeze_srhadd(<8 x i8> %a0, <8 x i16> %a1) {
; CHECK-LABEL: freeze_srhadd:
; CHECK: // %bb.0:
; CHECK-NEXT: sshll v0.8h, v0.8b, #0
; CHECK-NEXT: sshr v1.8h, v1.8h, #8
; CHECK-NEXT: srhadd v0.8h, v0.8h, v1.8h
-; CHECK-NEXT: shl v0.8h, v0.8h, #8
-; CHECK-NEXT: sshr v0.8h, v0.8h, #8
; CHECK-NEXT: ret
%x0 = sext <8 x i8> %a0 to <8 x i16>
%x1 = ashr <8 x i16> %a1, splat (i16 8)
diff --git a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll
index b89f551..e2c861b 100644
--- a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll
+++ b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll
@@ -327,9 +327,6 @@ define void @test_2x8bit_mask_with_extracts_and_ptest(i64 %i, i64 %n) {
; CHECK-SVE2p1-SME2-LABEL: test_2x8bit_mask_with_extracts_and_ptest:
; CHECK-SVE2p1-SME2: // %bb.0: // %entry
; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.h, p1.h }, x0, x1
-; CHECK-SVE2p1-SME2-NEXT: ptrue p2.b
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p3.b, p0.b, p1.b
-; CHECK-SVE2p1-SME2-NEXT: ptest p2, p3.b
; CHECK-SVE2p1-SME2-NEXT: b.pl .LBB11_2
; CHECK-SVE2p1-SME2-NEXT: // %bb.1: // %if.then
; CHECK-SVE2p1-SME2-NEXT: b use
@@ -368,9 +365,6 @@ define void @test_2x8bit_mask_with_extracts_and_reinterpret_casts(i64 %i, i64 %n
; CHECK-SVE2p1-SME2-LABEL: test_2x8bit_mask_with_extracts_and_reinterpret_casts:
; CHECK-SVE2p1-SME2: // %bb.0: // %entry
; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.s, p1.s }, x0, x1
-; CHECK-SVE2p1-SME2-NEXT: ptrue p2.h
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p3.h, p0.h, p1.h
-; CHECK-SVE2p1-SME2-NEXT: ptest p2, p3.b
; CHECK-SVE2p1-SME2-NEXT: b.pl .LBB12_2
; CHECK-SVE2p1-SME2-NEXT: // %bb.1: // %if.then
; CHECK-SVE2p1-SME2-NEXT: b use
@@ -413,14 +407,9 @@ define void @test_4x4bit_mask_with_extracts_and_ptest(i64 %i, i64 %n) {
; CHECK-SVE2p1-SME2-NEXT: adds x8, x0, x8
; CHECK-SVE2p1-SME2-NEXT: csinv x8, x8, xzr, lo
; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.s, p1.s }, x0, x1
-; CHECK-SVE2p1-SME2-NEXT: whilelo { p2.s, p3.s }, x8, x1
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p4.h, p0.h, p1.h
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p5.h, p2.h, p3.h
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p4.b, p4.b, p5.b
-; CHECK-SVE2p1-SME2-NEXT: ptrue p5.b
-; CHECK-SVE2p1-SME2-NEXT: ptest p5, p4.b
; CHECK-SVE2p1-SME2-NEXT: b.pl .LBB13_2
; CHECK-SVE2p1-SME2-NEXT: // %bb.1: // %if.then
+; CHECK-SVE2p1-SME2-NEXT: whilelo { p2.s, p3.s }, x8, x1
; CHECK-SVE2p1-SME2-NEXT: b use
; CHECK-SVE2p1-SME2-NEXT: .LBB13_2: // %if.end
; CHECK-SVE2p1-SME2-NEXT: ret
@@ -463,14 +452,9 @@ define void @test_4x2bit_mask_with_extracts_and_reinterpret_casts(i64 %i, i64 %n
; CHECK-SVE2p1-SME2-NEXT: adds x8, x0, x8
; CHECK-SVE2p1-SME2-NEXT: csinv x8, x8, xzr, lo
; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.d, p1.d }, x0, x1
-; CHECK-SVE2p1-SME2-NEXT: whilelo { p2.d, p3.d }, x8, x1
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p4.s, p0.s, p1.s
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p5.s, p2.s, p3.s
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p4.h, p4.h, p5.h
-; CHECK-SVE2p1-SME2-NEXT: ptrue p5.h
-; CHECK-SVE2p1-SME2-NEXT: ptest p5, p4.b
; CHECK-SVE2p1-SME2-NEXT: b.pl .LBB14_2
; CHECK-SVE2p1-SME2-NEXT: // %bb.1: // %if.then
+; CHECK-SVE2p1-SME2-NEXT: whilelo { p2.d, p3.d }, x8, x1
; CHECK-SVE2p1-SME2-NEXT: b use
; CHECK-SVE2p1-SME2-NEXT: .LBB14_2: // %if.end
; CHECK-SVE2p1-SME2-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/llvm.modf.ll b/llvm/test/CodeGen/AArch64/llvm.modf.ll
index 41fe796..503742f 100644
--- a/llvm/test/CodeGen/AArch64/llvm.modf.ll
+++ b/llvm/test/CodeGen/AArch64/llvm.modf.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-gnu-linux < %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -mtriple=aarch64-gnu-linux < %s | FileCheck -check-prefixes=CHECK,CHECK-SD %s
+; RUN: llc -mtriple=aarch64-gnu-linux -global-isel < %s | FileCheck -check-prefixes=CHECK,CHECK-GI %s
define { half, half } @test_modf_f16(half %a) {
; CHECK-LABEL: test_modf_f16:
@@ -55,61 +56,95 @@ define half @test_modf_f16_only_use_integral_part(half %a) {
}
define { <2 x half>, <2 x half> } @test_modf_v2f16(<2 x half> %a) {
-; CHECK-LABEL: test_modf_v2f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #64
-; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: mov h1, v0.h[1]
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: add x0, sp, #44
-; CHECK-NEXT: fcvt s0, h1
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: fcvt h0, s0
-; CHECK-NEXT: add x0, sp, #40
-; CHECK-NEXT: fcvt s1, h1
-; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: fmov s0, s1
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: fcvt h2, s0
-; CHECK-NEXT: add x0, sp, #56
-; CHECK-NEXT: mov h1, v1.h[2]
-; CHECK-NEXT: fcvt s0, h1
-; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: mov v2.h[1], v1.h[0]
-; CHECK-NEXT: str q2, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: fcvt h2, s0
-; CHECK-NEXT: add x0, sp, #60
-; CHECK-NEXT: mov h1, v1.h[3]
-; CHECK-NEXT: fcvt s0, h1
-; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: mov v1.h[2], v2.h[0]
-; CHECK-NEXT: str q1, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldp s2, s1, [sp, #40]
-; CHECK-NEXT: fcvt h4, s0
-; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
-; CHECK-NEXT: fcvt h3, s1
-; CHECK-NEXT: fcvt h1, s2
-; CHECK-NEXT: ldr s2, [sp, #56]
-; CHECK-NEXT: mov v0.h[3], v4.h[0]
-; CHECK-NEXT: fcvt h2, s2
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: mov v1.h[1], v3.h[0]
-; CHECK-NEXT: ldr s3, [sp, #60]
-; CHECK-NEXT: mov v1.h[2], v2.h[0]
-; CHECK-NEXT: fcvt h2, s3
-; CHECK-NEXT: mov v1.h[3], v2.h[0]
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
-; CHECK-NEXT: add sp, sp, #64
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_modf_v2f16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #64
+; CHECK-SD-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 64
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: mov h1, v0.h[1]
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: add x0, sp, #44
+; CHECK-SD-NEXT: fcvt s0, h1
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: fcvt h0, s0
+; CHECK-SD-NEXT: add x0, sp, #40
+; CHECK-SD-NEXT: fcvt s1, h1
+; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: fmov s0, s1
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: fcvt h2, s0
+; CHECK-SD-NEXT: add x0, sp, #56
+; CHECK-SD-NEXT: mov h1, v1.h[2]
+; CHECK-SD-NEXT: fcvt s0, h1
+; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: mov v2.h[1], v1.h[0]
+; CHECK-SD-NEXT: str q2, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: fcvt h2, s0
+; CHECK-SD-NEXT: add x0, sp, #60
+; CHECK-SD-NEXT: mov h1, v1.h[3]
+; CHECK-SD-NEXT: fcvt s0, h1
+; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: mov v1.h[2], v2.h[0]
+; CHECK-SD-NEXT: str q1, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldp s2, s1, [sp, #40]
+; CHECK-SD-NEXT: fcvt h4, s0
+; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
+; CHECK-SD-NEXT: fcvt h3, s1
+; CHECK-SD-NEXT: fcvt h1, s2
+; CHECK-SD-NEXT: ldr s2, [sp, #56]
+; CHECK-SD-NEXT: mov v0.h[3], v4.h[0]
+; CHECK-SD-NEXT: fcvt h2, s2
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-SD-NEXT: ldr s3, [sp, #60]
+; CHECK-SD-NEXT: mov v1.h[2], v2.h[0]
+; CHECK-SD-NEXT: fcvt h2, s3
+; CHECK-SD-NEXT: mov v1.h[3], v2.h[0]
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT: add sp, sp, #64
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_modf_v2f16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub sp, sp, #64
+; CHECK-GI-NEXT: str d8, [sp, #48] // 8-byte Folded Spill
+; CHECK-GI-NEXT: str x30, [sp, #56] // 8-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT: .cfi_offset w30, -8
+; CHECK-GI-NEXT: .cfi_offset b8, -16
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov h8, v0.h[1]
+; CHECK-GI-NEXT: add x0, sp, #40
+; CHECK-GI-NEXT: fcvt s0, h0
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: fcvt h0, s0
+; CHECK-GI-NEXT: ldr s1, [sp, #40]
+; CHECK-GI-NEXT: add x0, sp, #44
+; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fcvt h0, s1
+; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fcvt s0, h8
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: ldr s1, [sp, #44]
+; CHECK-GI-NEXT: fcvt h3, s0
+; CHECK-GI-NEXT: ldr x30, [sp, #56] // 8-byte Folded Reload
+; CHECK-GI-NEXT: ldr d8, [sp, #48] // 8-byte Folded Reload
+; CHECK-GI-NEXT: fcvt h2, s1
+; CHECK-GI-NEXT: ldp q0, q1, [sp] // 32-byte Folded Reload
+; CHECK-GI-NEXT: mov v0.h[1], v3.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-GI-NEXT: add sp, sp, #64
+; CHECK-GI-NEXT: ret
%result = call { <2 x half>, <2 x half> } @llvm.modf.v2f16(<2 x half> %a)
ret { <2 x half>, <2 x half> } %result
}
@@ -130,80 +165,156 @@ define { float, float } @test_modf_f32(float %a) {
}
define { <3 x float>, <3 x float> } @test_modf_v3f32(<3 x float> %a) {
-; CHECK-LABEL: test_modf_v3f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #80
-; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 80
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w20, -16
-; CHECK-NEXT: .cfi_offset w30, -32
-; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: mov s0, v0.s[1]
-; CHECK-NEXT: add x0, sp, #56
-; CHECK-NEXT: add x19, sp, #56
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: add x0, sp, #44
-; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
-; CHECK-NEXT: add x0, sp, #60
-; CHECK-NEXT: add x20, sp, #60
-; CHECK-NEXT: mov v0.s[1], v1.s[0]
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: mov s0, v0.s[2]
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldr s1, [sp, #44]
-; CHECK-NEXT: ldr q2, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
-; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
-; CHECK-NEXT: ld1 { v1.s }[1], [x19]
-; CHECK-NEXT: mov v2.s[2], v0.s[0]
-; CHECK-NEXT: ld1 { v1.s }[2], [x20]
-; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload
-; CHECK-NEXT: mov v0.16b, v2.16b
-; CHECK-NEXT: add sp, sp, #80
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_modf_v3f32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #80
+; CHECK-SD-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-SD-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 80
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w20, -16
+; CHECK-SD-NEXT: .cfi_offset w30, -32
+; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: mov s0, v0.s[1]
+; CHECK-SD-NEXT: add x0, sp, #56
+; CHECK-SD-NEXT: add x19, sp, #56
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: add x0, sp, #44
+; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-SD-NEXT: add x0, sp, #60
+; CHECK-SD-NEXT: add x20, sp, #60
+; CHECK-SD-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: mov s0, v0.s[2]
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldr s1, [sp, #44]
+; CHECK-SD-NEXT: ldr q2, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-SD-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
+; CHECK-SD-NEXT: ld1 { v1.s }[1], [x19]
+; CHECK-SD-NEXT: mov v2.s[2], v0.s[0]
+; CHECK-SD-NEXT: ld1 { v1.s }[2], [x20]
+; CHECK-SD-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-SD-NEXT: mov v0.16b, v2.16b
+; CHECK-SD-NEXT: add sp, sp, #80
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_modf_v3f32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp d9, d8, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x30, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w30, -16
+; CHECK-GI-NEXT: .cfi_offset b8, -24
+; CHECK-GI-NEXT: .cfi_offset b9, -32
+; CHECK-GI-NEXT: add x0, sp, #68
+; CHECK-GI-NEXT: mov s8, v0.s[1]
+; CHECK-GI-NEXT: mov s9, v0.s[2]
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: ldr s1, [sp, #68]
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: add x0, sp, #72
+; CHECK-GI-NEXT: stp q0, q1, [sp, #32] // 32-byte Folded Spill
+; CHECK-GI-NEXT: fmov s0, s8
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NEXT: add x0, sp, #76
+; CHECK-GI-NEXT: add x19, sp, #76
+; CHECK-GI-NEXT: ldr s0, [sp, #72]
+; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fmov s0, s9
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: ldp q3, q2, [sp, #16] // 32-byte Folded Reload
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp d9, d8, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v2.s[1], v1.s[0]
+; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v1.s[1], v3.s[0]
+; CHECK-GI-NEXT: mov v2.s[2], v0.s[0]
+; CHECK-GI-NEXT: ld1 { v1.s }[2], [x19]
+; CHECK-GI-NEXT: ldp x30, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v0.16b, v2.16b
+; CHECK-GI-NEXT: add sp, sp, #112
+; CHECK-GI-NEXT: ret
%result = call { <3 x float>, <3 x float> } @llvm.modf.v3f32(<3 x float> %a)
ret { <3 x float>, <3 x float> } %result
}
define { <2 x float>, <2 x float> } @test_modf_v2f32(<2 x float> %a) {
-; CHECK-LABEL: test_modf_v2f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #64
-; CHECK-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: add x0, sp, #40
-; CHECK-NEXT: add x19, sp, #40
-; CHECK-NEXT: mov s0, v0.s[1]
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
-; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: add x0, sp, #44
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldr s1, [sp, #44]
-; CHECK-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
-; CHECK-NEXT: ld1 { v1.s }[1], [x19]
-; CHECK-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: mov v0.s[1], v2.s[0]
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: add sp, sp, #64
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_modf_v2f32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #64
+; CHECK-SD-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 64
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: add x0, sp, #40
+; CHECK-SD-NEXT: add x19, sp, #40
+; CHECK-SD-NEXT: mov s0, v0.s[1]
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: add x0, sp, #44
+; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldr s1, [sp, #44]
+; CHECK-SD-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-SD-NEXT: ld1 { v1.s }[1], [x19]
+; CHECK-SD-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-SD-NEXT: mov v0.s[1], v2.s[0]
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: add sp, sp, #64
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_modf_v2f32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub sp, sp, #64
+; CHECK-GI-NEXT: str d8, [sp, #32] // 8-byte Folded Spill
+; CHECK-GI-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w30, -16
+; CHECK-GI-NEXT: .cfi_offset b8, -32
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: add x0, sp, #40
+; CHECK-GI-NEXT: mov s8, v0.s[1]
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NEXT: add x0, sp, #44
+; CHECK-GI-NEXT: add x19, sp, #44
+; CHECK-GI-NEXT: ldr s0, [sp, #40]
+; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fmov s0, s8
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: ldp q2, q1, [sp] // 32-byte Folded Reload
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: ldr d8, [sp, #32] // 8-byte Folded Reload
+; CHECK-GI-NEXT: mov v2.s[1], v0.s[0]
+; CHECK-GI-NEXT: ld1 { v1.s }[1], [x19]
+; CHECK-GI-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-GI-NEXT: fmov d0, d2
+; CHECK-GI-NEXT: add sp, sp, #64
+; CHECK-GI-NEXT: ret
%result = call { <2 x float>, <2 x float> } @llvm.modf.v2f32(<2 x float> %a)
ret { <2 x float>, <2 x float> } %result
}
@@ -224,32 +335,80 @@ define { double, double } @test_modf_f64(double %a) {
}
define { <2 x double>, <2 x double> } @test_modf_v2f64(<2 x double> %a) {
-; CHECK-LABEL: test_modf_v2f64:
+; CHECK-SD-LABEL: test_modf_v2f64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #64
+; CHECK-SD-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 64
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: mov d0, v0.d[1]
+; CHECK-SD-NEXT: add x0, sp, #32
+; CHECK-SD-NEXT: add x19, sp, #32
+; CHECK-SD-NEXT: bl modf
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: add x0, sp, #40
+; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: bl modf
+; CHECK-SD-NEXT: ldr d1, [sp, #40]
+; CHECK-SD-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: ld1 { v1.d }[1], [x19]
+; CHECK-SD-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-SD-NEXT: mov v0.d[1], v2.d[0]
+; CHECK-SD-NEXT: add sp, sp, #64
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_modf_v2f64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub sp, sp, #80
+; CHECK-GI-NEXT: str d8, [sp, #48] // 8-byte Folded Spill
+; CHECK-GI-NEXT: stp x30, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 80
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w30, -16
+; CHECK-GI-NEXT: .cfi_offset b8, -32
+; CHECK-GI-NEXT: add x0, sp, #40
+; CHECK-GI-NEXT: mov d8, v0.d[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: bl modf
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NEXT: add x0, sp, #56
+; CHECK-GI-NEXT: add x19, sp, #56
+; CHECK-GI-NEXT: ldr d0, [sp, #40]
+; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fmov d0, d8
+; CHECK-GI-NEXT: bl modf
+; CHECK-GI-NEXT: ldp q2, q1, [sp] // 32-byte Folded Reload
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: ldr d8, [sp, #48] // 8-byte Folded Reload
+; CHECK-GI-NEXT: mov v2.d[1], v0.d[0]
+; CHECK-GI-NEXT: ld1 { v1.d }[1], [x19]
+; CHECK-GI-NEXT: ldp x30, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v0.16b, v2.16b
+; CHECK-GI-NEXT: add sp, sp, #80
+; CHECK-GI-NEXT: ret
+ %result = call { <2 x double>, <2 x double> } @llvm.modf.v2f64(<2 x double> %a)
+ ret { <2 x double>, <2 x double> } %result
+}
+
+define { fp128, fp128 } @test_modf_fp128(fp128 %a) {
+; CHECK-LABEL: test_modf_fp128:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #64
-; CHECK-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: mov d0, v0.d[1]
-; CHECK-NEXT: add x0, sp, #32
-; CHECK-NEXT: add x19, sp, #32
-; CHECK-NEXT: bl modf
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: add x0, sp, #40
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: bl modf
-; CHECK-NEXT: ldr d1, [sp, #40]
-; CHECK-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: ld1 { v1.d }[1], [x19]
-; CHECK-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: mov v0.d[1], v2.d[0]
-; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: mov x0, sp
+; CHECK-NEXT: bl modfl
+; CHECK-NEXT: ldr q1, [sp]
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
- %result = call { <2 x double>, <2 x double> } @llvm.modf.v2f64(<2 x double> %a)
- ret { <2 x double>, <2 x double> } %result
+ %result = call { fp128, fp128 } @llvm.modf.fp128(fp128 %a)
+ ret { fp128, fp128 } %result
}
diff --git a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
index d60c870..4287507 100644
--- a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
+++ b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
@@ -1257,21 +1257,55 @@ entry:
}
define <4 x i32> @partial_reduce_shl_sext_const_rhs6(<16 x i8> %l, <4 x i32> %part) {
-; CHECK-COMMON-LABEL: partial_reduce_shl_sext_const_rhs6:
+; CHECK-NODOT-LABEL: partial_reduce_shl_sext_const_rhs6:
+; CHECK-NODOT: // %bb.0:
+; CHECK-NODOT-NEXT: sshll v2.8h, v0.8b, #0
+; CHECK-NODOT-NEXT: sshll2 v0.8h, v0.16b, #0
+; CHECK-NODOT-NEXT: sshll v3.4s, v0.4h, #6
+; CHECK-NODOT-NEXT: sshll2 v4.4s, v2.8h, #6
+; CHECK-NODOT-NEXT: sshll v2.4s, v2.4h, #6
+; CHECK-NODOT-NEXT: sshll2 v0.4s, v0.8h, #6
+; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-NODOT-NEXT: add v2.4s, v4.4s, v3.4s
+; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-NODOT-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NODOT-NEXT: ret
+;
+; CHECK-DOT-LABEL: partial_reduce_shl_sext_const_rhs6:
+; CHECK-DOT: // %bb.0:
+; CHECK-DOT-NEXT: movi v2.16b, #64
+; CHECK-DOT-NEXT: sdot v1.4s, v0.16b, v2.16b
+; CHECK-DOT-NEXT: mov v0.16b, v1.16b
+; CHECK-DOT-NEXT: ret
+;
+; CHECK-DOT-I8MM-LABEL: partial_reduce_shl_sext_const_rhs6:
+; CHECK-DOT-I8MM: // %bb.0:
+; CHECK-DOT-I8MM-NEXT: movi v2.16b, #64
+; CHECK-DOT-I8MM-NEXT: sdot v1.4s, v0.16b, v2.16b
+; CHECK-DOT-I8MM-NEXT: mov v0.16b, v1.16b
+; CHECK-DOT-I8MM-NEXT: ret
+ %ext = sext <16 x i8> %l to <16 x i32>
+ %shift = shl nsw <16 x i32> %ext, splat (i32 6)
+ %red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift)
+ ret <4 x i32> %red
+}
+
+define <4 x i32> @partial_reduce_shl_sext_const_rhs7(<16 x i8> %l, <4 x i32> %part) {
+; CHECK-COMMON-LABEL: partial_reduce_shl_sext_const_rhs7:
; CHECK-COMMON: // %bb.0:
; CHECK-COMMON-NEXT: sshll v2.8h, v0.8b, #0
; CHECK-COMMON-NEXT: sshll2 v0.8h, v0.16b, #0
-; CHECK-COMMON-NEXT: sshll v3.4s, v0.4h, #6
-; CHECK-COMMON-NEXT: sshll2 v4.4s, v2.8h, #6
-; CHECK-COMMON-NEXT: sshll v2.4s, v2.4h, #6
-; CHECK-COMMON-NEXT: sshll2 v0.4s, v0.8h, #6
+; CHECK-COMMON-NEXT: sshll v3.4s, v0.4h, #7
+; CHECK-COMMON-NEXT: sshll2 v4.4s, v2.8h, #7
+; CHECK-COMMON-NEXT: sshll v2.4s, v2.4h, #7
+; CHECK-COMMON-NEXT: sshll2 v0.4s, v0.8h, #7
; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s
; CHECK-COMMON-NEXT: add v2.4s, v4.4s, v3.4s
; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s
; CHECK-COMMON-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-COMMON-NEXT: ret
%ext = sext <16 x i8> %l to <16 x i32>
- %shift = shl nsw <16 x i32> %ext, splat (i32 6)
+ %shift = shl nsw <16 x i32> %ext, splat (i32 7)
%red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift)
ret <4 x i32> %red
}
@@ -1331,19 +1365,33 @@ define <4 x i32> @partial_reduce_shl_sext_non_const_rhs(<16 x i8> %l, <4 x i32>
}
define <4 x i32> @partial_reduce_shl_zext_const_rhs6(<16 x i8> %l, <4 x i32> %part) {
-; CHECK-COMMON-LABEL: partial_reduce_shl_zext_const_rhs6:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-COMMON-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-COMMON-NEXT: ushll v3.4s, v0.4h, #6
-; CHECK-COMMON-NEXT: ushll2 v4.4s, v2.8h, #6
-; CHECK-COMMON-NEXT: ushll v2.4s, v2.4h, #6
-; CHECK-COMMON-NEXT: ushll2 v0.4s, v0.8h, #6
-; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-COMMON-NEXT: add v2.4s, v4.4s, v3.4s
-; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-COMMON-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-COMMON-NEXT: ret
+; CHECK-NODOT-LABEL: partial_reduce_shl_zext_const_rhs6:
+; CHECK-NODOT: // %bb.0:
+; CHECK-NODOT-NEXT: ushll v2.8h, v0.8b, #0
+; CHECK-NODOT-NEXT: ushll2 v0.8h, v0.16b, #0
+; CHECK-NODOT-NEXT: ushll v3.4s, v0.4h, #6
+; CHECK-NODOT-NEXT: ushll2 v4.4s, v2.8h, #6
+; CHECK-NODOT-NEXT: ushll v2.4s, v2.4h, #6
+; CHECK-NODOT-NEXT: ushll2 v0.4s, v0.8h, #6
+; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-NODOT-NEXT: add v2.4s, v4.4s, v3.4s
+; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-NODOT-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NODOT-NEXT: ret
+;
+; CHECK-DOT-LABEL: partial_reduce_shl_zext_const_rhs6:
+; CHECK-DOT: // %bb.0:
+; CHECK-DOT-NEXT: movi v2.16b, #64
+; CHECK-DOT-NEXT: udot v1.4s, v0.16b, v2.16b
+; CHECK-DOT-NEXT: mov v0.16b, v1.16b
+; CHECK-DOT-NEXT: ret
+;
+; CHECK-DOT-I8MM-LABEL: partial_reduce_shl_zext_const_rhs6:
+; CHECK-DOT-I8MM: // %bb.0:
+; CHECK-DOT-I8MM-NEXT: movi v2.16b, #64
+; CHECK-DOT-I8MM-NEXT: udot v1.4s, v0.16b, v2.16b
+; CHECK-DOT-I8MM-NEXT: mov v0.16b, v1.16b
+; CHECK-DOT-I8MM-NEXT: ret
%ext = zext <16 x i8> %l to <16 x i32>
%shift = shl nsw <16 x i32> %ext, splat (i32 6)
%red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift)
diff --git a/llvm/test/CodeGen/AArch64/pr161420.ll b/llvm/test/CodeGen/AArch64/pr161420.ll
new file mode 100644
index 0000000..dcdf0ed
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/pr161420.ll
@@ -0,0 +1,54 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128-Fn32"
+target triple = "arm64-apple-macosx15.0.0"
+
+; From: https://github.com/llvm/llvm-project/issues/161420. This test checks that
+; two `luti4` instructions are emitted.
+define void @pluto(ptr %arg, ptr %arg1, ptr %arg2, ptr %arg3) #0 {
+; CHECK-LABEL: pluto:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: mov w8, #0 ; =0x0
+; CHECK-NEXT: ldr zt0, [x1]
+; CHECK-NEXT: ldr z4, [x3]
+; CHECK-NEXT: ptrue pn8.h
+; CHECK-NEXT: ld1h { z0.h - z3.h }, pn8/z, [x0]
+; CHECK-NEXT: luti4 { z16.h - z19.h }, zt0, z4[0]
+; CHECK-NEXT: fmla za.h[w8, 0, vgx4], { z0.h - z3.h }, { z16.h - z19.h }
+; CHECK-NEXT: ldr zt0, [x2]
+; CHECK-NEXT: luti4 { z4.h - z7.h }, zt0, z4[0]
+; CHECK-NEXT: fmla za.h[w8, 2, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
+; CHECK-NEXT: ret
+bb:
+ tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %arg1)
+ %load = load <vscale x 16 x i8>, ptr %arg3, align 16
+ %call = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c16()
+ %call4 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %call, ptr %arg)
+ %extractvalue = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call4, 0
+ %extractvalue5 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call4, 1
+ %extractvalue6 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call4, 2
+ %extractvalue7 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call4, 3
+ %call8 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 0, <vscale x 16 x i8> %load, i32 0)
+ %extractvalue9 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call8, 0
+ %extractvalue10 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call8, 1
+ %extractvalue11 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call8, 2
+ %extractvalue12 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call8, 3
+ tail call void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32 0, <vscale x 8 x half> %extractvalue, <vscale x 8 x half> %extractvalue5, <vscale x 8 x half> %extractvalue6, <vscale x 8 x half> %extractvalue7, <vscale x 8 x half> %extractvalue9, <vscale x 8 x half> %extractvalue10, <vscale x 8 x half> %extractvalue11, <vscale x 8 x half> %extractvalue12)
+ tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %arg2)
+ %call13 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 0, <vscale x 16 x i8> %load, i32 0)
+ %extractvalue14 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call13, 0
+ %extractvalue15 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call13, 1
+ %extractvalue16 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call13, 2
+ %extractvalue17 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call13, 3
+ tail call void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32 2, <vscale x 8 x half> %extractvalue, <vscale x 8 x half> %extractvalue5, <vscale x 8 x half> %extractvalue6, <vscale x 8 x half> %extractvalue7, <vscale x 8 x half> %extractvalue14, <vscale x 8 x half> %extractvalue15, <vscale x 8 x half> %extractvalue16, <vscale x 8 x half> %extractvalue17)
+ ret void
+}
+
+declare void @llvm.aarch64.sme.ldr.zt(i32, ptr)
+declare target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c16()
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount"), ptr)
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 immarg, <vscale x 16 x i8>, i32 immarg)
+declare void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
+
+attributes #0 = { mustprogress nofree noinline norecurse nosync nounwind ssp willreturn uwtable(sync) "aarch64_inout_za" "aarch64_inout_zt0" "aarch64_pstate_sm_enabled" "target-cpu"="apple-m1" "target-features"="+fp-armv8,+lse,+neon,+sme,+sme-f16f16,+sme2,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a" }
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll
index 92d3e11..d48e0cd 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll
@@ -48,6 +48,27 @@ define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscal
ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res
}
+; Tests multiple identical luti4 intrinsics with ZT0 loads interspersed, are not CSD'd.
+define void @test_multiple_luti4_zt_i8(ptr %ptrA, ptr %ptrB, <vscale x 16 x i8> %x) {
+; CHECK-LABEL: test_multiple_luti4_zt_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr zt0, [x0]
+; CHECK-NEXT: luti4 { z4.s - z7.s }, zt0, z0[1]
+; CHECK-NEXT: // fake_use: $z4 $z4_z5_z6_z7
+; CHECK-NEXT: ldr zt0, [x1]
+; CHECK-NEXT: luti4 { z0.s - z3.s }, zt0, z0[1]
+; CHECK-NEXT: // fake_use: $z0 $z0_z1_z2_z3
+; CHECK-NEXT: ret
+ tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrA)
+ %res1 = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4f32(i32 0, <vscale x 16 x i8> %x, i32 1)
+ tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrB)
+ %res2 = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4f32(i32 0, <vscale x 16 x i8> %x, i32 1)
+
+ call void (...) @llvm.fake.use({<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res1)
+ call void (...) @llvm.fake.use({<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res2)
+ ret void
+}
+
declare {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8i16(i32, <vscale x 16 x i8>, i32)
declare {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4i32(i32, <vscale x 16 x i8>, i32)
declare {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8bf16(i32, <vscale x 16 x i8>, i32)
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll
index 778f311..c1eff8d 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll
@@ -14,4 +14,27 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16
ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %res
}
+; Tests multiple identical luti4 intrinsics with ZT0 loads interspersed, are not CSD'd.
+define void @test_multiple_luti4_zt_i8(ptr %ptrA, ptr %ptrB, <vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1) #0 {
+; CHECK-LABEL: test_multiple_luti4_zt_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr zt0, [x0]
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: luti4 { z4.b - z7.b }, zt0, { z0, z1 }
+; CHECK-NEXT: // fake_use: $z4 $z4_z5_z6_z7
+; CHECK-NEXT: ldr zt0, [x1]
+; CHECK-NEXT: luti4 { z0.b - z3.b }, zt0, { z0, z1 }
+; CHECK-NEXT: // fake_use: $z0 $z0_z1_z2_z3
+; CHECK-NEXT: ret
+ tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrA)
+ %res1 = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti4.zt.x4.nxv16i8(i32 0, <vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1)
+ tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrB)
+ %res2 = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti4.zt.x4.nxv16i8(i32 0, <vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1)
+
+ call void (...) @llvm.fake.use({ <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res1)
+ call void (...) @llvm.fake.use({ <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res2)
+ ret void
+}
+
attributes #0 = { "target-features"="+sme2,+sme-lutv2"}
diff --git a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir b/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir
index bff0cac..0298168 100644
--- a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir
+++ b/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir
@@ -983,26 +983,22 @@ body: |
; EXPAND-LABEL: name: zpr_predicate_spill_p4_saved
; EXPAND: liveins: $p0, $p1, $p2, $p3, $fp, $p8, $p4
; EXPAND-NEXT: {{ $}}
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
- ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.3)
+ ; EXPAND-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.2)
+ ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.1)
; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.1)
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
+ ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.0)
;
; EXPAND-NEXT: $p8 = IMPLICIT_DEF
;
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.2)
+ ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.1)
; EXPAND-NEXT: $p4 = frame-destroy PTRUE_B 31, implicit $vg
; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.1)
+ ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.0)
; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
- ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.3)
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
+ ; EXPAND-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $p1, implicit $p2, implicit $p3
; If we spill a register above p8, p4 must also be saved, so we can guarantee
diff --git a/llvm/test/CodeGen/AArch64/spillfill-sve.mir b/llvm/test/CodeGen/AArch64/spillfill-sve.mir
index 2b16dd0f..5569175 100644
--- a/llvm/test/CodeGen/AArch64/spillfill-sve.mir
+++ b/llvm/test/CodeGen/AArch64/spillfill-sve.mir
@@ -39,7 +39,7 @@ body: |
; CHECK-LABEL: name: spills_fills_stack_id_ppr
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: ''
+ ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: ''
; EXPAND-LABEL: name: spills_fills_stack_id_ppr
; EXPAND: STR_PXI $p0, $sp, 7
@@ -82,7 +82,7 @@ body: |
; CHECK-LABEL: name: spills_fills_stack_id_ppr2
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 2
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: ''
+ ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: ''
; EXPAND-LABEL: name: spills_fills_stack_id_ppr2
; EXPAND: STR_PXI $p0, $sp, 6
@@ -127,7 +127,7 @@ body: |
; CHECK-LABEL: name: spills_fills_stack_id_ppr2
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 2
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: ''
+ ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: ''
; EXPAND-LABEL: name: spills_fills_stack_id_ppr2mul2
; EXPAND: STR_PXI $p0, $sp, 6
@@ -172,7 +172,7 @@ body: |
; CHECK-LABEL: name: spills_fills_stack_id_pnr
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: ''
+ ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: ''
; EXPAND-LABEL: name: spills_fills_stack_id_pnr
; EXPAND: STR_PXI $pn0, $sp, 7
@@ -211,7 +211,7 @@ body: |
; CHECK-LABEL: name: spills_fills_stack_id_virtreg_pnr
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: ''
+ ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: ''
; EXPAND-LABEL: name: spills_fills_stack_id_virtreg_pnr
; EXPAND: renamable $pn8 = WHILEGE_CXX_B
diff --git a/llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll
new file mode 100644
index 0000000..690a39d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll
@@ -0,0 +1,824 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -pass-remarks-analysis=stack-frame-layout 2>&1 >/dev/null | FileCheck %s --check-prefixes=CHECK-FRAMELAYOUT
+
+; CHECK-FRAMELAYOUT-LABEL: Function: zpr_and_ppr_local
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Variable, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024
+
+; <GPRs>
+; %ppr_local sp+2048+30*vscale (= #15, mul vl for str/ldr PPR)
+; 14 x vscale bytes of padding sp+2048+16*vscale
+; <hazard padding> sp+1024+16*vscale
+; %zpr_local sp+1024
+; <hazard padding>
+; -> sp
+define void @zpr_and_ppr_local(<vscale x 16 x i1> %pred, <vscale x 16 x i8> %vector) "aarch64_pstate_sm_compatible" {
+; CHECK-LABEL: zpr_and_ppr_local:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2064 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: add x8, sp, #2048
+; CHECK-NEXT: str p0, [x8, #15, mul vl]
+; CHECK-NEXT: add x8, sp, #1024
+; CHECK-NEXT: str z0, [x8]
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %zpr_local = alloca <vscale x 16 x i8>
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile <vscale x 16 x i8> %vector, ptr %zpr_local
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: zpr_and_ppr_local_fp
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Variable, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024
+
+; <GPRs>
+; -> fp
+; %ppr_local fp-2*vscale (= #-1, mul vl for str/ldr PPR)
+; 14 x vscale bytes of padding fp-16*vscale
+; <hazard padding> fp-1024-16*vscale
+; %zpr_local fp-1024-32*vscale (= #-2, mul vl for str/ldr ZPR)
+; <hazard padding>
+; -> sp
+define void @zpr_and_ppr_local_fp(<vscale x 16 x i1> %pred, <vscale x 16 x i8> %vector) "aarch64_pstate_sm_compatible" "frame-pointer"="all" {
+; CHECK-LABEL: zpr_and_ppr_local_fp:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: .cfi_def_cfa w29, 16
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: sub x8, x29, #1024
+; CHECK-NEXT: str p0, [x29, #-1, mul vl]
+; CHECK-NEXT: str z0, [x8, #-2, mul vl]
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %zpr_local = alloca <vscale x 16 x i8>
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile <vscale x 16 x i8> %vector, ptr %zpr_local
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: fpr_and_ppr_local
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1048-16 x vscale], Type: Variable, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2080-16 x vscale], Type: Variable, Align: 16, Size: 1024
+
+; <GPRs>
+; %ppr_local sp+2064+14*vscale (= #7, mul vl for str/ldr PPR)
+; 14 x vscale bytes of padding sp+2064
+; <hazard padding> sp+1040
+; %fpr_local sp+1032
+; 8 bytes of padding sp+1024
+; <hazard padding>
+; -> sp
+define void @fpr_and_ppr_local(<vscale x 16 x i1> %pred, double %double) "aarch64_pstate_sm_compatible" {
+; CHECK-LABEL: fpr_and_ppr_local:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: sub sp, sp, #1040
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: add x8, sp, #2064
+; CHECK-NEXT: str p0, [x8, #7, mul vl]
+; CHECK-NEXT: str d0, [sp, #1032]
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1040
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %fpr_local = alloca double
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile double %double, ptr %fpr_local
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: fpr_and_ppr_local_fp
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1048-16 x vscale], Type: Variable, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2080-16 x vscale], Type: Variable, Align: 16, Size: 1024
+
+; <GPRs>
+; -> fp
+; %ppr_local fp-2*vscale (= #-1, mul vl for str/ldr PPR)
+; 14 x vscale bytes of padding
+; <hazard padding>
+; %fpr_local sp+1032
+; 8 bytes of padding sp+1024
+; <hazard padding>
+; -> sp
+define void @fpr_and_ppr_local_fp(<vscale x 16 x i1> %pred, double %double) "aarch64_pstate_sm_compatible" "frame-pointer"="all" {
+; CHECK-LABEL: fpr_and_ppr_local_fp:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: sub sp, sp, #1040
+; CHECK-NEXT: .cfi_def_cfa w29, 16
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: str p0, [x29, #-1, mul vl]
+; CHECK-NEXT: str d0, [sp, #1032]
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1040
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %fpr_local = alloca double
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile double %double, ptr %fpr_local
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: gpr_and_ppr_local
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2072-32 x vscale], Type: Variable, Align: 8, Size: 8
+
+; <CS GPRs>
+; %ppr_local sp+2064+30*vscale (= #15, mul vl for str/ldr PPR)
+; 14 x vscale bytes of padding
+; <hazard padding> sp+1040+16*vscale
+; <fpr callee save: z8> sp+1040
+; <hazard padding> sp+16
+; %gpr_local sp+8
+; 8 bytes of padding
+; -> sp
+define void @gpr_and_ppr_local(<vscale x 16 x i1> %pred, i64 %int) "aarch64_pstate_sm_compatible" {
+; CHECK-LABEL: gpr_and_ppr_local:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1040
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2080 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 16 * VG - 1040
+; CHECK-NEXT: add x8, sp, #2064
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: str p0, [x8, #15, mul vl]
+; CHECK-NEXT: str x0, [sp, #8]
+; CHECK-NEXT: add sp, sp, #1040
+; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ tail call void asm sideeffect "", "~{d8}"() #1 ; Spill an FPR so hazard padding is needed
+ %ppr_local = alloca <vscale x 16 x i1>
+ %gpr_local = alloca i64
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile i64 %int, ptr %gpr_local
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: gpr_and_ppr_local_fp
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2072-32 x vscale], Type: Variable, Align: 8, Size: 8
+
+; <CS GPRs>
+; -> fp
+; %ppr_local fp-2*vscale (= #-1, mul vl for str/ldr PPR)
+; 14 x vscale bytes of padding
+; <hazard padding>
+; <fpr callee save: z8>
+; <hazard padding>
+; %gpr_local sp+8
+; 8 bytes of padding
+; -> sp
+define void @gpr_and_ppr_local_fp(<vscale x 16 x i1> %pred, i64 %int) "aarch64_pstate_sm_compatible" "frame-pointer"="all" {
+; CHECK-LABEL: gpr_and_ppr_local_fp:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1040
+; CHECK-NEXT: .cfi_def_cfa w29, 16
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 16 * VG - 1040
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: str p0, [x29, #-1, mul vl]
+; CHECK-NEXT: str x0, [sp, #8]
+; CHECK-NEXT: add sp, sp, #1040
+; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ tail call void asm sideeffect "", "~{d8}"() #1 ; Spill an FPR so hazard padding is needed
+ %ppr_local = alloca <vscale x 16 x i1>
+ %gpr_local = alloca i64
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile i64 %int, ptr %gpr_local
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: all_stack_areas
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-4 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-6 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-8 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-10 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-12 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-14 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-16 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-18 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-20 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-22 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-24 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-34 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-272 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-288 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-304 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-320 x vscale], Type: Variable, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1048-320 x vscale], Type: Variable, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2080-320 x vscale], Type: Variable, Align: 16, Size: 1024
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2088-320 x vscale], Type: Variable, Align: 8, Size: 8
+
+; <CS GPRs>
+; <CS PPRs>
+; %ppr_local sp+2080+286*vscale (addvl #17, addpl #7)
+; 14 * vscale bytes of padding sp+2080+272*vscale
+; <hazard padding> sp+1056+272*vscale
+; <CS ZPRs> sp+1056+16*vscale
+; %zpr_local sp+1056
+; %fpr_local sp+1048
+; 8 bytes of padding sp+1040
+; <hazard padding> sp+16
+; %gpr_local sp+8
+; 8 bytes of padding sp
+; -> sp
+define void @all_stack_areas(<vscale x 16 x i1> %pred, double %fp) {
+; CHECK-LABEL: all_stack_areas:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-17
+; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1056
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 2096 + 160 * VG
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 32 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d9 @ cfa - 40 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d10 @ cfa - 48 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d11 @ cfa - 56 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d12 @ cfa - 64 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d13 @ cfa - 72 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d14 @ cfa - 80 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x7f, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d15 @ cfa - 88 * VG - 1040
+; CHECK-NEXT: add x0, sp, #2080
+; CHECK-NEXT: add x8, sp, #2080
+; CHECK-NEXT: add x1, sp, #1056
+; CHECK-NEXT: addvl x0, x0, #17
+; CHECK-NEXT: add x2, sp, #1048
+; CHECK-NEXT: add x3, sp, #8
+; CHECK-NEXT: addpl x0, x0, #7
+; CHECK-NEXT: str d0, [sp, #1048]
+; CHECK-NEXT: str p0, [x8, #143, mul vl]
+; CHECK-NEXT: bl foo
+; CHECK-NEXT: add sp, sp, #1056
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #17
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %zpr_local = alloca <vscale x 16 x i8>
+ %fpr_local = alloca double
+ ; // Needed to sort %fpr_local into the FPR region
+ store double %fp, ptr %fpr_local
+ ; // Needed to sort %ppr_local into the PPR region
+ store <vscale x 16 x i1> %pred, ptr %ppr_local
+ %gpr_local = alloca i64
+ call void @foo(ptr %ppr_local, ptr %zpr_local, ptr %fpr_local, ptr %gpr_local)
+ ret void
+}
+declare void @foo(ptr, ptr, ptr, ptr)
+
+; CHECK-FRAMELAYOUT-LABEL: Function: all_stack_areas_fp
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-2 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-4 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-6 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-8 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-10 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-12 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-14 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-18 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-20 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-22 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-24 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-34 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-272 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-288 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-304 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-320 x vscale], Type: Variable, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1064-320 x vscale], Type: Variable, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2096-320 x vscale], Type: Variable, Align: 16, Size: 1024
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2104-320 x vscale], Type: Variable, Align: 8, Size: 8
+
+; <CS GPRs>
+; -> fp
+; <CS PPRs> fp-32*vscale
+; %ppr_local fp-34*vscale (addpl #-17)
+; 14 * vscale bytes of padding fp-48*vscale
+; <hazard padding> fp-1024-48*vscale
+; <CS ZPRs> fp-1024-304*vscale
+; %zpr_local sp-1024-320*vscale (addvl #-20)
+; %fpr_local sp+1048
+; 8 bytes of padding sp+1040
+; <hazard padding> sp+16
+; %gpr_local sp+8
+; 8 bytes of padding sp
+; -> sp
+define void @all_stack_areas_fp(<vscale x 16 x i1> %pred, double %fp) "frame-pointer"="all" {
+; CHECK-LABEL: all_stack_areas_fp:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: str x28, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-17
+; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1056
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: .cfi_def_cfa w29, 32
+; CHECK-NEXT: .cfi_offset w28, -16
+; CHECK-NEXT: .cfi_offset w30, -24
+; CHECK-NEXT: .cfi_offset w29, -32
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d8 @ cfa - 32 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d9 @ cfa - 40 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d10 @ cfa - 48 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d11 @ cfa - 56 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d12 @ cfa - 64 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d13 @ cfa - 72 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d14 @ cfa - 80 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x7f, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d15 @ cfa - 88 * VG - 1056
+; CHECK-NEXT: sub x1, x29, #1024
+; CHECK-NEXT: addpl x0, x29, #-17
+; CHECK-NEXT: add x2, sp, #1048
+; CHECK-NEXT: addvl x1, x1, #-20
+; CHECK-NEXT: add x3, sp, #8
+; CHECK-NEXT: str d0, [sp, #1048]
+; CHECK-NEXT: str p0, [x29, #-17, mul vl]
+; CHECK-NEXT: bl foo
+; CHECK-NEXT: add sp, sp, #1056
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #17
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x28, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %zpr_local = alloca <vscale x 16 x i8>
+ %fpr_local = alloca double
+ ; // Needed to sort %fpr_local into the FPR region
+ store double %fp, ptr %fpr_local
+ ; // Needed to sort %ppr_local into the PPR region
+ store <vscale x 16 x i1> %pred, ptr %ppr_local
+ %gpr_local = alloca i64
+ call void @foo(ptr %ppr_local, ptr %zpr_local, ptr %fpr_local, ptr %gpr_local)
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: svecc_call
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48], Type: Spill, Align: 16, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-56], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-2 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-4 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-6 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-8 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-10 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-12 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-14 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-16 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-18 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-20 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-22 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-24 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-48 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-272 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-288 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2112-288 x vscale], Type: Variable, Align: 16, Size: 1024
+
+define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) "aarch64_pstate_sm_compatible" {
+; CHECK-LABEL: svecc_call:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: cntd x9
+; CHECK-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: str x9, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: .cfi_def_cfa w29, 64
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w26, -16
+; CHECK-NEXT: .cfi_offset w27, -24
+; CHECK-NEXT: .cfi_offset w28, -32
+; CHECK-NEXT: .cfi_offset vg, -48
+; CHECK-NEXT: .cfi_offset w30, -56
+; CHECK-NEXT: .cfi_offset w29, -64
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-16
+; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 24 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 32 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 40 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 48 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 56 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 64 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 72 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 80 * IncomingVG - 1088
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: mov x8, x0
+; CHECK-NEXT: bl __arm_sme_state
+; CHECK-NEXT: mov x19, x0
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: tbz w19, #0, .LBB8_2
+; CHECK-NEXT: // %bb.1: // %entry
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: .LBB8_2: // %entry
+; CHECK-NEXT: mov x0, x8
+; CHECK-NEXT: mov w1, #45 // =0x2d
+; CHECK-NEXT: mov w2, #37 // =0x25
+; CHECK-NEXT: bl memset
+; CHECK-NEXT: tbz w19, #0, .LBB8_4
+; CHECK-NEXT: // %bb.3: // %entry
+; CHECK-NEXT: smstart sm
+; CHECK-NEXT: .LBB8_4: // %entry
+; CHECK-NEXT: mov w0, #22647 // =0x5877
+; CHECK-NEXT: movk w0, #59491, lsl #16
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #16
+; CHECK-NEXT: .cfi_restore z8
+; CHECK-NEXT: .cfi_restore z9
+; CHECK-NEXT: .cfi_restore z10
+; CHECK-NEXT: .cfi_restore z11
+; CHECK-NEXT: .cfi_restore z12
+; CHECK-NEXT: .cfi_restore z13
+; CHECK-NEXT: .cfi_restore z14
+; CHECK-NEXT: .cfi_restore z15
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: .cfi_def_cfa wsp, 64
+; CHECK-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: .cfi_restore w19
+; CHECK-NEXT: .cfi_restore w26
+; CHECK-NEXT: .cfi_restore w27
+; CHECK-NEXT: .cfi_restore w28
+; CHECK-NEXT: .cfi_restore vg
+; CHECK-NEXT: .cfi_restore w30
+; CHECK-NEXT: .cfi_restore w29
+; CHECK-NEXT: ret
+entry:
+ tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
+ %call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37)
+ ret i32 -396142473
+}
+declare ptr @memset(ptr, i32, i32)
+
+; FIXME: aarch64-split-sve-objects is currently not supported in this function
+; as it requires stack reealignment (for the 32-byte aligned alloca).
+; GPR CSRs
+; <hazard padding>
+; FPR CSRs
+; <hazrd padding>
+; <SVE locals (PPRs and ZPRs)> <--- hazard between PPRs and ZPRs here!
+; <realignment padding>
+; -> sp
+define void @zpr_and_ppr_local_realignment(<vscale x 16 x i1> %pred, <vscale x 16 x i8> %vector, i64 %gpr) "aarch64_pstate_sm_compatible" {
+; CHECK-LABEL: zpr_and_ppr_local_realignment:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #1040
+; CHECK-NEXT: sub x9, sp, #1040
+; CHECK-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
+; CHECK-NEXT: add x29, sp, #1024
+; CHECK-NEXT: addvl x9, x9, #-2
+; CHECK-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
+; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0
+; CHECK-NEXT: .cfi_def_cfa w29, 16
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: sub x8, x29, #1024
+; CHECK-NEXT: str p0, [x8, #-1, mul vl]
+; CHECK-NEXT: str z0, [x8, #-2, mul vl]
+; CHECK-NEXT: str x0, [sp]
+; CHECK-NEXT: sub sp, x29, #1024
+; CHECK-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
+; CHECK-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #1040
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %zpr_local = alloca <vscale x 16 x i8>
+ %gpr_local = alloca i64, align 32
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile <vscale x 16 x i8> %vector, ptr %zpr_local
+ store volatile i64 %gpr, ptr %gpr_local
+ ret void
+}
+
+define void @zpr_and_ppr_local_stack_probing(<vscale x 16 x i1> %pred, <vscale x 16 x i8> %vector, i64 %gpr)
+; CHECK-LABEL: zpr_and_ppr_local_stack_probing:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str xzr, [sp]
+; CHECK-NEXT: sub sp, sp, #1824
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str xzr, [sp]
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xb0, 0x16, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2864 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: add x8, sp, #2848
+; CHECK-NEXT: str p0, [x8, #15, mul vl]
+; CHECK-NEXT: add x8, sp, #1824
+; CHECK-NEXT: str z0, [x8]
+; CHECK-NEXT: str x0, [sp]
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1824
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ "probe-stack"="inline-asm" "stack-probe-size"="4096" "frame-pointer"="none" "aarch64_pstate_sm_compatible"
+{
+ %ppr_local = alloca <vscale x 16 x i1>
+ %zpr_local = alloca <vscale x 16 x i8>
+ %gpr_local = alloca i64, i64 100, align 8
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile <vscale x 16 x i8> %vector, ptr %zpr_local
+ store volatile i64 %gpr, ptr %gpr_local
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll
index 5f52280..333a8be 100644
--- a/llvm/test/CodeGen/AArch64/stack-hazard.ll
+++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll
@@ -1,7 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=0 | FileCheck %s --check-prefixes=CHECK,CHECK0
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=64 | FileCheck %s --check-prefixes=CHECK,CHECK64
-; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-NOSPLITSVE
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-split-sve-objects -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-SPLITSVE
define i32 @basic(i32 noundef %num) {
; CHECK-LABEL: basic:
@@ -1503,72 +1504,24 @@ define [2 x <vscale x 4 x i1>] @sve_signature_pred_2xv4i1([2 x <vscale x 4 x i1>
}
define [2 x <vscale x 4 x i1>] @sve_signature_pred_2xv4i1_caller([2 x <vscale x 4 x i1>] %arg1, [2 x <vscale x 4 x i1>] %arg2) nounwind "aarch64_pstate_sm_compatible" {
-; CHECK0-LABEL: sve_signature_pred_2xv4i1_caller:
-; CHECK0: // %bb.0:
-; CHECK0-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK0-NEXT: addvl sp, sp, #-1
-; CHECK0-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
-; CHECK0-NEXT: mov p5.b, p0.b
-; CHECK0-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK0-NEXT: mov p4.b, p1.b
-; CHECK0-NEXT: mov p0.b, p2.b
-; CHECK0-NEXT: mov p1.b, p3.b
-; CHECK0-NEXT: mov p2.b, p5.b
-; CHECK0-NEXT: mov p3.b, p4.b
-; CHECK0-NEXT: bl sve_signature_pred_2xv4i1
-; CHECK0-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK0-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK0-NEXT: addvl sp, sp, #1
-; CHECK0-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; CHECK0-NEXT: ret
-;
-; CHECK64-LABEL: sve_signature_pred_2xv4i1_caller:
-; CHECK64: // %bb.0:
-; CHECK64-NEXT: sub sp, sp, #80
-; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill
-; CHECK64-NEXT: addvl sp, sp, #-1
-; CHECK64-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
-; CHECK64-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK64-NEXT: sub sp, sp, #64
-; CHECK64-NEXT: mov p4.b, p1.b
-; CHECK64-NEXT: mov p5.b, p0.b
-; CHECK64-NEXT: mov p0.b, p2.b
-; CHECK64-NEXT: mov p1.b, p3.b
-; CHECK64-NEXT: mov p2.b, p5.b
-; CHECK64-NEXT: mov p3.b, p4.b
-; CHECK64-NEXT: bl sve_signature_pred_2xv4i1
-; CHECK64-NEXT: add sp, sp, #64
-; CHECK64-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK64-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK64-NEXT: addvl sp, sp, #1
-; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload
-; CHECK64-NEXT: add sp, sp, #80
-; CHECK64-NEXT: ret
-;
-; CHECK1024-LABEL: sve_signature_pred_2xv4i1_caller:
-; CHECK1024: // %bb.0:
-; CHECK1024-NEXT: sub sp, sp, #1040
-; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
-; CHECK1024-NEXT: addvl sp, sp, #-1
-; CHECK1024-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: sub sp, sp, #1024
-; CHECK1024-NEXT: mov p4.b, p1.b
-; CHECK1024-NEXT: mov p5.b, p0.b
-; CHECK1024-NEXT: mov p0.b, p2.b
-; CHECK1024-NEXT: mov p1.b, p3.b
-; CHECK1024-NEXT: mov p2.b, p5.b
-; CHECK1024-NEXT: mov p3.b, p4.b
-; CHECK1024-NEXT: bl sve_signature_pred_2xv4i1
-; CHECK1024-NEXT: add sp, sp, #1024
-; CHECK1024-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: addvl sp, sp, #1
-; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
-; CHECK1024-NEXT: add sp, sp, #1040
-; CHECK1024-NEXT: ret
+; CHECK-LABEL: sve_signature_pred_2xv4i1_caller:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p5.b, p0.b
+; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p4.b, p1.b
+; CHECK-NEXT: mov p0.b, p2.b
+; CHECK-NEXT: mov p1.b, p3.b
+; CHECK-NEXT: mov p2.b, p5.b
+; CHECK-NEXT: mov p3.b, p4.b
+; CHECK-NEXT: bl sve_signature_pred_2xv4i1
+; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
%res = call [2 x <vscale x 4 x i1>] @sve_signature_pred_2xv4i1([2 x <vscale x 4 x i1>] %arg2, [2 x <vscale x 4 x i1>] %arg1)
ret [2 x <vscale x 4 x i1>] %res
}
@@ -2113,139 +2066,269 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK64-NEXT: .cfi_restore w29
; CHECK64-NEXT: ret
;
-; CHECK1024-LABEL: svecc_call:
-; CHECK1024: // %bb.0: // %entry
-; CHECK1024-NEXT: sub sp, sp, #1088
-; CHECK1024-NEXT: .cfi_def_cfa_offset 1088
-; CHECK1024-NEXT: cntd x9
-; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill
-; CHECK1024-NEXT: add x29, sp, #1024
-; CHECK1024-NEXT: .cfi_def_cfa w29, 64
-; CHECK1024-NEXT: .cfi_offset w19, -16
-; CHECK1024-NEXT: .cfi_offset w26, -24
-; CHECK1024-NEXT: .cfi_offset w27, -32
-; CHECK1024-NEXT: .cfi_offset w28, -40
-; CHECK1024-NEXT: .cfi_offset vg, -48
-; CHECK1024-NEXT: .cfi_offset w30, -56
-; CHECK1024-NEXT: .cfi_offset w29, -64
-; CHECK1024-NEXT: addvl sp, sp, #-18
-; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088
-; CHECK1024-NEXT: sub sp, sp, #1024
-; CHECK1024-NEXT: mov x8, x0
-; CHECK1024-NEXT: bl __arm_sme_state
-; CHECK1024-NEXT: mov x19, x0
-; CHECK1024-NEXT: //APP
-; CHECK1024-NEXT: //NO_APP
-; CHECK1024-NEXT: tbz w19, #0, .LBB28_2
-; CHECK1024-NEXT: // %bb.1: // %entry
-; CHECK1024-NEXT: smstop sm
-; CHECK1024-NEXT: .LBB28_2: // %entry
-; CHECK1024-NEXT: mov x0, x8
-; CHECK1024-NEXT: mov w1, #45 // =0x2d
-; CHECK1024-NEXT: mov w2, #37 // =0x25
-; CHECK1024-NEXT: bl memset
-; CHECK1024-NEXT: tbz w19, #0, .LBB28_4
-; CHECK1024-NEXT: // %bb.3: // %entry
-; CHECK1024-NEXT: smstart sm
-; CHECK1024-NEXT: .LBB28_4: // %entry
-; CHECK1024-NEXT: mov w0, #22647 // =0x5877
-; CHECK1024-NEXT: movk w0, #59491, lsl #16
-; CHECK1024-NEXT: add sp, sp, #1024
-; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: addvl sp, sp, #18
-; CHECK1024-NEXT: .cfi_restore z8
-; CHECK1024-NEXT: .cfi_restore z9
-; CHECK1024-NEXT: .cfi_restore z10
-; CHECK1024-NEXT: .cfi_restore z11
-; CHECK1024-NEXT: .cfi_restore z12
-; CHECK1024-NEXT: .cfi_restore z13
-; CHECK1024-NEXT: .cfi_restore z14
-; CHECK1024-NEXT: .cfi_restore z15
-; CHECK1024-NEXT: .cfi_def_cfa wsp, 1088
-; CHECK1024-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
-; CHECK1024-NEXT: add sp, sp, #1088
-; CHECK1024-NEXT: .cfi_def_cfa_offset 0
-; CHECK1024-NEXT: .cfi_restore w19
-; CHECK1024-NEXT: .cfi_restore w26
-; CHECK1024-NEXT: .cfi_restore w27
-; CHECK1024-NEXT: .cfi_restore w28
-; CHECK1024-NEXT: .cfi_restore vg
-; CHECK1024-NEXT: .cfi_restore w30
-; CHECK1024-NEXT: .cfi_restore w29
-; CHECK1024-NEXT: ret
+; CHECK1024-NOSPLITSVE-LABEL: svecc_call:
+; CHECK1024-NOSPLITSVE: // %bb.0: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 1088
+; CHECK1024-NOSPLITSVE-NEXT: cntd x9
+; CHECK1024-NOSPLITSVE-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: add x29, sp, #1024
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa w29, 64
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w19, -16
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w26, -24
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w27, -32
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w28, -40
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset vg, -48
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w30, -56
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w29, -64
+; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #-18
+; CHECK1024-NOSPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1024
+; CHECK1024-NOSPLITSVE-NEXT: mov x8, x0
+; CHECK1024-NOSPLITSVE-NEXT: bl __arm_sme_state
+; CHECK1024-NOSPLITSVE-NEXT: mov x19, x0
+; CHECK1024-NOSPLITSVE-NEXT: //APP
+; CHECK1024-NOSPLITSVE-NEXT: //NO_APP
+; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB28_2
+; CHECK1024-NOSPLITSVE-NEXT: // %bb.1: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: smstop sm
+; CHECK1024-NOSPLITSVE-NEXT: .LBB28_2: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: mov x0, x8
+; CHECK1024-NOSPLITSVE-NEXT: mov w1, #45 // =0x2d
+; CHECK1024-NOSPLITSVE-NEXT: mov w2, #37 // =0x25
+; CHECK1024-NOSPLITSVE-NEXT: bl memset
+; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB28_4
+; CHECK1024-NOSPLITSVE-NEXT: // %bb.3: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: smstart sm
+; CHECK1024-NOSPLITSVE-NEXT: .LBB28_4: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: mov w0, #22647 // =0x5877
+; CHECK1024-NOSPLITSVE-NEXT: movk w0, #59491, lsl #16
+; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1024
+; CHECK1024-NOSPLITSVE-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #18
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z8
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z9
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z10
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z11
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z12
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z13
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z14
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z15
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa wsp, 1088
+; CHECK1024-NOSPLITSVE-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 0
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w19
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w26
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w27
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w28
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore vg
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w30
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w29
+; CHECK1024-NOSPLITSVE-NEXT: ret
+;
+; CHECK1024-SPLITSVE-LABEL: svecc_call:
+; CHECK1024-SPLITSVE: // %bb.0: // %entry
+; CHECK1024-SPLITSVE-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 64
+; CHECK1024-SPLITSVE-NEXT: cntd x9
+; CHECK1024-SPLITSVE-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str x9, [sp, #16] // 8-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: mov x29, sp
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa w29, 64
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w19, -8
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w26, -16
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w27, -24
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w28, -32
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset vg, -48
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w30, -56
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w29, -64
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-2
+; CHECK1024-SPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1024
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-16
+; CHECK1024-SPLITSVE-NEXT: str z23, [sp] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 24 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 32 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 40 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 48 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 56 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 64 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 72 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 80 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1024
+; CHECK1024-SPLITSVE-NEXT: mov x8, x0
+; CHECK1024-SPLITSVE-NEXT: bl __arm_sme_state
+; CHECK1024-SPLITSVE-NEXT: mov x19, x0
+; CHECK1024-SPLITSVE-NEXT: //APP
+; CHECK1024-SPLITSVE-NEXT: //NO_APP
+; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB28_2
+; CHECK1024-SPLITSVE-NEXT: // %bb.1: // %entry
+; CHECK1024-SPLITSVE-NEXT: smstop sm
+; CHECK1024-SPLITSVE-NEXT: .LBB28_2: // %entry
+; CHECK1024-SPLITSVE-NEXT: mov x0, x8
+; CHECK1024-SPLITSVE-NEXT: mov w1, #45 // =0x2d
+; CHECK1024-SPLITSVE-NEXT: mov w2, #37 // =0x25
+; CHECK1024-SPLITSVE-NEXT: bl memset
+; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB28_4
+; CHECK1024-SPLITSVE-NEXT: // %bb.3: // %entry
+; CHECK1024-SPLITSVE-NEXT: smstart sm
+; CHECK1024-SPLITSVE-NEXT: .LBB28_4: // %entry
+; CHECK1024-SPLITSVE-NEXT: mov w0, #22647 // =0x5877
+; CHECK1024-SPLITSVE-NEXT: movk w0, #59491, lsl #16
+; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1024
+; CHECK1024-SPLITSVE-NEXT: ldr z23, [sp] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1024
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #16
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z8
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z9
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z10
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z11
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z12
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z13
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z14
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z15
+; CHECK1024-SPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #2
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa wsp, 64
+; CHECK1024-SPLITSVE-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 0
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w19
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w26
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w27
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w28
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore vg
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w30
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w29
+; CHECK1024-SPLITSVE-NEXT: ret
entry:
tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
%call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37)
@@ -2505,138 +2588,267 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK64-NEXT: .cfi_restore w29
; CHECK64-NEXT: ret
;
-; CHECK1024-LABEL: svecc_alloca_call:
-; CHECK1024: // %bb.0: // %entry
-; CHECK1024-NEXT: sub sp, sp, #1088
-; CHECK1024-NEXT: .cfi_def_cfa_offset 1088
-; CHECK1024-NEXT: cntd x9
-; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill
-; CHECK1024-NEXT: add x29, sp, #1024
-; CHECK1024-NEXT: .cfi_def_cfa w29, 64
-; CHECK1024-NEXT: .cfi_offset w19, -16
-; CHECK1024-NEXT: .cfi_offset w26, -24
-; CHECK1024-NEXT: .cfi_offset w27, -32
-; CHECK1024-NEXT: .cfi_offset w28, -40
-; CHECK1024-NEXT: .cfi_offset vg, -48
-; CHECK1024-NEXT: .cfi_offset w30, -56
-; CHECK1024-NEXT: .cfi_offset w29, -64
-; CHECK1024-NEXT: addvl sp, sp, #-18
-; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088
-; CHECK1024-NEXT: sub sp, sp, #1072
-; CHECK1024-NEXT: bl __arm_sme_state
-; CHECK1024-NEXT: mov x19, x0
-; CHECK1024-NEXT: //APP
-; CHECK1024-NEXT: //NO_APP
-; CHECK1024-NEXT: tbz w19, #0, .LBB29_2
-; CHECK1024-NEXT: // %bb.1: // %entry
-; CHECK1024-NEXT: smstop sm
-; CHECK1024-NEXT: .LBB29_2: // %entry
-; CHECK1024-NEXT: mov x0, sp
-; CHECK1024-NEXT: mov w1, #45 // =0x2d
-; CHECK1024-NEXT: mov w2, #37 // =0x25
-; CHECK1024-NEXT: bl memset
-; CHECK1024-NEXT: tbz w19, #0, .LBB29_4
-; CHECK1024-NEXT: // %bb.3: // %entry
-; CHECK1024-NEXT: smstart sm
-; CHECK1024-NEXT: .LBB29_4: // %entry
-; CHECK1024-NEXT: mov w0, #22647 // =0x5877
-; CHECK1024-NEXT: movk w0, #59491, lsl #16
-; CHECK1024-NEXT: add sp, sp, #1072
-; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: addvl sp, sp, #18
-; CHECK1024-NEXT: .cfi_restore z8
-; CHECK1024-NEXT: .cfi_restore z9
-; CHECK1024-NEXT: .cfi_restore z10
-; CHECK1024-NEXT: .cfi_restore z11
-; CHECK1024-NEXT: .cfi_restore z12
-; CHECK1024-NEXT: .cfi_restore z13
-; CHECK1024-NEXT: .cfi_restore z14
-; CHECK1024-NEXT: .cfi_restore z15
-; CHECK1024-NEXT: .cfi_def_cfa wsp, 1088
-; CHECK1024-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
-; CHECK1024-NEXT: add sp, sp, #1088
-; CHECK1024-NEXT: .cfi_def_cfa_offset 0
-; CHECK1024-NEXT: .cfi_restore w19
-; CHECK1024-NEXT: .cfi_restore w26
-; CHECK1024-NEXT: .cfi_restore w27
-; CHECK1024-NEXT: .cfi_restore w28
-; CHECK1024-NEXT: .cfi_restore vg
-; CHECK1024-NEXT: .cfi_restore w30
-; CHECK1024-NEXT: .cfi_restore w29
-; CHECK1024-NEXT: ret
+; CHECK1024-NOSPLITSVE-LABEL: svecc_alloca_call:
+; CHECK1024-NOSPLITSVE: // %bb.0: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 1088
+; CHECK1024-NOSPLITSVE-NEXT: cntd x9
+; CHECK1024-NOSPLITSVE-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: add x29, sp, #1024
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa w29, 64
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w19, -16
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w26, -24
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w27, -32
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w28, -40
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset vg, -48
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w30, -56
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w29, -64
+; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #-18
+; CHECK1024-NOSPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1072
+; CHECK1024-NOSPLITSVE-NEXT: bl __arm_sme_state
+; CHECK1024-NOSPLITSVE-NEXT: mov x19, x0
+; CHECK1024-NOSPLITSVE-NEXT: //APP
+; CHECK1024-NOSPLITSVE-NEXT: //NO_APP
+; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB29_2
+; CHECK1024-NOSPLITSVE-NEXT: // %bb.1: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: smstop sm
+; CHECK1024-NOSPLITSVE-NEXT: .LBB29_2: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: mov x0, sp
+; CHECK1024-NOSPLITSVE-NEXT: mov w1, #45 // =0x2d
+; CHECK1024-NOSPLITSVE-NEXT: mov w2, #37 // =0x25
+; CHECK1024-NOSPLITSVE-NEXT: bl memset
+; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB29_4
+; CHECK1024-NOSPLITSVE-NEXT: // %bb.3: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: smstart sm
+; CHECK1024-NOSPLITSVE-NEXT: .LBB29_4: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: mov w0, #22647 // =0x5877
+; CHECK1024-NOSPLITSVE-NEXT: movk w0, #59491, lsl #16
+; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1072
+; CHECK1024-NOSPLITSVE-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #18
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z8
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z9
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z10
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z11
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z12
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z13
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z14
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z15
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa wsp, 1088
+; CHECK1024-NOSPLITSVE-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 0
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w19
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w26
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w27
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w28
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore vg
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w30
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w29
+; CHECK1024-NOSPLITSVE-NEXT: ret
+;
+; CHECK1024-SPLITSVE-LABEL: svecc_alloca_call:
+; CHECK1024-SPLITSVE: // %bb.0: // %entry
+; CHECK1024-SPLITSVE-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 64
+; CHECK1024-SPLITSVE-NEXT: cntd x9
+; CHECK1024-SPLITSVE-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str x9, [sp, #16] // 8-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: mov x29, sp
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa w29, 64
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w19, -8
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w26, -16
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w27, -24
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w28, -32
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset vg, -48
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w30, -56
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w29, -64
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-2
+; CHECK1024-SPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1024
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-16
+; CHECK1024-SPLITSVE-NEXT: str z23, [sp] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 24 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 32 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 40 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 48 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 56 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 64 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 72 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 80 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1072
+; CHECK1024-SPLITSVE-NEXT: bl __arm_sme_state
+; CHECK1024-SPLITSVE-NEXT: mov x19, x0
+; CHECK1024-SPLITSVE-NEXT: //APP
+; CHECK1024-SPLITSVE-NEXT: //NO_APP
+; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB29_2
+; CHECK1024-SPLITSVE-NEXT: // %bb.1: // %entry
+; CHECK1024-SPLITSVE-NEXT: smstop sm
+; CHECK1024-SPLITSVE-NEXT: .LBB29_2: // %entry
+; CHECK1024-SPLITSVE-NEXT: mov x0, sp
+; CHECK1024-SPLITSVE-NEXT: mov w1, #45 // =0x2d
+; CHECK1024-SPLITSVE-NEXT: mov w2, #37 // =0x25
+; CHECK1024-SPLITSVE-NEXT: bl memset
+; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB29_4
+; CHECK1024-SPLITSVE-NEXT: // %bb.3: // %entry
+; CHECK1024-SPLITSVE-NEXT: smstart sm
+; CHECK1024-SPLITSVE-NEXT: .LBB29_4: // %entry
+; CHECK1024-SPLITSVE-NEXT: mov w0, #22647 // =0x5877
+; CHECK1024-SPLITSVE-NEXT: movk w0, #59491, lsl #16
+; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1072
+; CHECK1024-SPLITSVE-NEXT: ldr z23, [sp] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1024
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #16
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z8
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z9
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z10
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z11
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z12
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z13
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z14
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z15
+; CHECK1024-SPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #2
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa wsp, 64
+; CHECK1024-SPLITSVE-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 0
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w19
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w26
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w27
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w28
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore vg
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w30
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w29
+; CHECK1024-SPLITSVE-NEXT: ret
entry:
tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
index 7bddd1d..cc63c7f 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
@@ -56,9 +56,9 @@ define aarch64_sve_vector_pcs <vscale x 16 x i1> @caller_with_many_svepred_arg(<
; CHECK: name: caller_with_many_svepred_arg
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2,
-; CHECK-NEXT: stack-id: scalable-vector
+; CHECK-NEXT: stack-id: scalable-predicate-vector
; CHECK: - { id: 1, name: '', type: default, offset: 0, size: 2, alignment: 2,
-; CHECK-NEXT: stack-id: scalable-vector
+; CHECK-NEXT: stack-id: scalable-predicate-vector
; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.0, 0
; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.1, 0
; CHECK-DAG: [[BASE1:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0
@@ -90,7 +90,7 @@ define aarch64_sve_vector_pcs <vscale x 16 x i1> @caller_with_svepred_arg_1xv16i
; CHECK: name: caller_with_svepred_arg_1xv16i1_4xv16i1
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2,
-; CHECK-NEXT: stack-id: scalable-vector,
+; CHECK-NEXT: stack-id: scalable-predicate-vector,
; CHECK: [[PRED0:%[0-9]+]]:ppr = COPY $p0
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
; CHECK: STR_PXI [[PRED0]], %stack.0, 0 :: (store (<vscale x 1 x s16>) into %stack.0)
@@ -139,7 +139,7 @@ define [4 x <vscale x 16 x i1>] @caller_with_svepred_arg_4xv16i1_4xv16i1([4 x <v
; CHECK: name: caller_with_svepred_arg_4xv16i1_4xv16i1
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 8, alignment: 2,
-; CHECK-NEXT: stack-id: scalable-vector,
+; CHECK-NEXT: stack-id: scalable-predicate-vector,
; CHECK: [[PRED3:%[0-9]+]]:ppr = COPY $p3
; CHECK: [[PRED2:%[0-9]+]]:ppr = COPY $p2
; CHECK: [[PRED1:%[0-9]+]]:ppr = COPY $p1
@@ -200,7 +200,7 @@ define [2 x <vscale x 32 x i1>] @caller_with_svepred_arg_2xv32i1_1xv16i1([2 x <v
; CHECK: name: caller_with_svepred_arg_2xv32i1_1xv16i1
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 8, alignment: 2,
-; CHECK-NEXT: stack-id: scalable-vector,
+; CHECK-NEXT: stack-id: scalable-predicate-vector,
; CHECK: [[PRED3:%[0-9]+]]:ppr = COPY $p3
; CHECK: [[PRED2:%[0-9]+]]:ppr = COPY $p2
; CHECK: [[PRED1:%[0-9]+]]:ppr = COPY $p1
diff --git a/llvm/test/CodeGen/AArch64/sve-load-store-legalisation.ll b/llvm/test/CodeGen/AArch64/sve-load-store-legalisation.ll
new file mode 100644
index 0000000..584753b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-load-store-legalisation.ll
@@ -0,0 +1,2854 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mattr=+sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define void @sve_load_store_nxv1i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i8>, ptr %a
+ store <vscale x 1 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i8>, ptr %a
+ store <vscale x 2 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i8>, ptr %a
+ store <vscale x 3 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i8>, ptr %a
+ store <vscale x 4 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv5i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv5i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i8>, ptr %a
+ store <vscale x 5 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv6i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv6i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1b { z1.s }, p1/z, [x0]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: st1b { z1.s }, p1, [x1]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1b { z0.d }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i8>, ptr %a
+ store <vscale x 6 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv7i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv7i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i8>, ptr %a
+ store <vscale x 7 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv8i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i8>, ptr %a
+ store <vscale x 8 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv9i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv9i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #9 // =0x9
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 9 x i8>, ptr %a
+ store <vscale x 9 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv10i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv10i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: ld1b { z1.h }, p1/z, [x0]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: st1b { z0.h }, p1, [x1]
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1b { z1.d }, p0, [x1, #4, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 10 x i8>, ptr %a
+ store <vscale x 10 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv11i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv11i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #11 // =0xb
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 11 x i8>, ptr %a
+ store <vscale x 11 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv12i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv12i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1b { z1.h }, p1/z, [x0]
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: st1b { z0.h }, p1, [x1]
+; CHECK-NEXT: st1b { z1.s }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 12 x i8>, ptr %a
+ store <vscale x 12 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv13i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv13i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #13 // =0xd
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 13 x i8>, ptr %a
+ store <vscale x 13 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv14i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv14i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ptrue p2.h
+; CHECK-NEXT: ld1b { z1.s }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: ld1b { z1.h }, p2/z, [x0]
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpkhi z2.s, z1.h
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: st1b { z0.h }, p2, [x1]
+; CHECK-NEXT: uunpklo z2.d, z2.s
+; CHECK-NEXT: st1b { z1.s }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: st1b { z2.d }, p0, [x1, #6, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 14 x i8>, ptr %a
+ store <vscale x 14 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv15i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv15i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 15 x i8>, ptr %a
+ store <vscale x 15 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv16i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 16 x i8>, ptr %a
+ store <vscale x 16 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv17i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv17i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #17 // =0x11
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 17 x i8>, ptr %a
+ store <vscale x 17 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv18i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv18i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, x8]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z1.s, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: uzp1 z1.s, z1.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: uzp1 z1.s, z0.s, z1.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: uzp1 z1.s, z1.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpklo z2.s, z1.h
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uunpkhi z2.d, z2.s
+; CHECK-NEXT: uzp1 z2.s, z0.s, z2.s
+; CHECK-NEXT: uzp1 z1.h, z2.h, z1.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpklo z2.s, z1.h
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uunpklo z2.d, z2.s
+; CHECK-NEXT: uzp1 z2.s, z2.s, z0.s
+; CHECK-NEXT: uzp1 z1.h, z2.h, z1.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpkhi z2.s, z1.h
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uunpkhi z2.d, z2.s
+; CHECK-NEXT: uzp1 z2.s, z0.s, z2.s
+; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpkhi z2.s, z1.h
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uunpklo z2.d, z2.s
+; CHECK-NEXT: uzp1 z2.s, z2.s, z0.s
+; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1b { z0.d }, p0, [x1, x8]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 18 x i8>, ptr %a
+ store <vscale x 18 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv19i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv19i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #19 // =0x13
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 19 x i8>, ptr %a
+ store <vscale x 19 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv20i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv20i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uzp1 z1.h, z0.h, z1.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uzp1 z1.h, z1.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: st1b { z0.s }, p0, [x1, #4, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 20 x i8>, ptr %a
+ store <vscale x 20 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv21i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv21i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #21 // =0x15
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 21 x i8>, ptr %a
+ store <vscale x 21 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv22i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv22i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: cntw x8, all, mul #5
+; CHECK-NEXT: ldr z2, [x0]
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: ld1b { z1.d }, p1/z, [x0, x8]
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uzp1 z1.s, z1.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uzp1 z1.h, z0.h, z1.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uzp1 z1.h, z1.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1b { z1.d }, p1, [x1, x8]
+; CHECK-NEXT: st1b { z0.s }, p0, [x1, #4, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 22 x i8>, ptr %a
+ store <vscale x 22 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv23i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv23i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #23 // =0x17
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 23 x i8>, ptr %a
+ store <vscale x 23 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv24i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv24i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1b { z1.h }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: st1b { z0.h }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 24 x i8>, ptr %a
+ store <vscale x 24 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv25i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv25i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #25 // =0x19
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 25 x i8>, ptr %a
+ store <vscale x 25 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv26i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv26i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: cnth x8, all, mul #3
+; CHECK-NEXT: ldr z2, [x0]
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, x8]
+; CHECK-NEXT: ld1b { z1.h }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1b { z1.d }, p0, [x1, x8]
+; CHECK-NEXT: st1b { z0.h }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 26 x i8>, ptr %a
+ store <vscale x 26 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv27i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv27i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #27 // =0x1b
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 27 x i8>, ptr %a
+ store <vscale x 27 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv28i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv28i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ldr z2, [x0]
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1b { z1.h }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: st1b { z0.h }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: st1b { z1.s }, p0, [x1, #6, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 28 x i8>, ptr %a
+ store <vscale x 28 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv29i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv29i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #29 // =0x1d
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 29 x i8>, ptr %a
+ store <vscale x 29 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv30i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv30i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: cntw x8, all, mul #7
+; CHECK-NEXT: ldr z3, [x0]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, x8]
+; CHECK-NEXT: ptrue p2.h
+; CHECK-NEXT: ld1b { z1.s }, p1/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1b { z2.h }, p2/z, [x0, #2, mul vl]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z2.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpkhi z2.s, z1.h
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uunpklo z2.d, z2.s
+; CHECK-NEXT: st1b { z2.d }, p0, [x1, x8]
+; CHECK-NEXT: st1b { z0.h }, p2, [x1, #2, mul vl]
+; CHECK-NEXT: st1b { z1.s }, p1, [x1, #6, mul vl]
+; CHECK-NEXT: str z3, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 30 x i8>, ptr %a
+ store <vscale x 30 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv31i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv31i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #31 // =0x1f
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 31 x i8>, ptr %a
+ store <vscale x 31 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv32i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv32i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 32 x i8>, ptr %a
+ store <vscale x 32 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i16>, ptr %a
+ store <vscale x 1 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i16>, ptr %a
+ store <vscale x 2 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i16>, ptr %a
+ store <vscale x 3 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i16>, ptr %a
+ store <vscale x 4 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv5i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv5i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i16>, ptr %a
+ store <vscale x 5 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv6i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv6i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1h { z0.s }, p1, [x1]
+; CHECK-NEXT: st1h { z1.d }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i16>, ptr %a
+ store <vscale x 6 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv7i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv7i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i16>, ptr %a
+ store <vscale x 7 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv8i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i16>, ptr %a
+ store <vscale x 8 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv9i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv9i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #9 // =0x9
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 9 x i16>, ptr %a
+ store <vscale x 9 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv10i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv10i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z1.h, z0.h, z0.h
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: uzp1 z1.s, z0.s, z1.s
+; CHECK-NEXT: uzp1 z1.h, z0.h, z1.h
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: uzp1 z1.s, z1.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [x1, #4, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 10 x i16>, ptr %a
+ store <vscale x 10 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv11i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv11i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #11 // =0xb
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 11 x i16>, ptr %a
+ store <vscale x 11 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv12i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv12i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1h { z1.s }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: st1h { z0.s }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 12 x i16>, ptr %a
+ store <vscale x 12 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv13i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv13i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #13 // =0xd
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 13 x i16>, ptr %a
+ store <vscale x 13 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv14i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv14i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z2, [x0]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1h { z0.s }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: st1h { z1.d }, p0, [x1, #6, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 14 x i16>, ptr %a
+ store <vscale x 14 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv15i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv15i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 15 x i16>, ptr %a
+ store <vscale x 15 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv16i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv16i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 16 x i16>, ptr %a
+ store <vscale x 16 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i32>, ptr %a
+ store <vscale x 1 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i32>, ptr %a
+ store <vscale x 2 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i32>, ptr %a
+ store <vscale x 3 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i32>, ptr %a
+ store <vscale x 4 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv5i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv5i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i32>, ptr %a
+ store <vscale x 5 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv6i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv6i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1w { z1.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1w { z0.d }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i32>, ptr %a
+ store <vscale x 6 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv7i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv7i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i32>, ptr %a
+ store <vscale x 7 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv8i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv8i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i32>, ptr %a
+ store <vscale x 8 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1i64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i64>, ptr %a
+ store <vscale x 1 x i64> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2i64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i64>, ptr %a
+ store <vscale x 2 x i64> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3i64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1d { z0.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1d { z1.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i64>, ptr %a
+ store <vscale x 3 x i64> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4i64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i64>, ptr %a
+ store <vscale x 4 x i64> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x half>, ptr %a
+ store <vscale x 1 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x half>, ptr %a
+ store <vscale x 2 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x half>, ptr %a
+ store <vscale x 3 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x half>, ptr %a
+ store <vscale x 4 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv5f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv5f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x half>, ptr %a
+ store <vscale x 5 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv6f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv6f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: st1h { z1.s }, p1, [x1]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x half>, ptr %a
+ store <vscale x 6 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv7f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv7f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x half>, ptr %a
+ store <vscale x 7 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv8f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x half>, ptr %a
+ store <vscale x 8 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv9f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv9f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #9 // =0x9
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 9 x half>, ptr %a
+ store <vscale x 9 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv10f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv10f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: st1h { z1.d }, p0, [x1, #4, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 10 x half>, ptr %a
+ store <vscale x 10 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv11f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv11f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #11 // =0xb
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 11 x half>, ptr %a
+ store <vscale x 11 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv12f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv12f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1h { z1.s }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: st1h { z1.s }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 12 x half>, ptr %a
+ store <vscale x 12 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv13f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv13f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #13 // =0xd
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 13 x half>, ptr %a
+ store <vscale x 13 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv14f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv14f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z2, [x0]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: st1h { z1.s }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [x1, #6, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 14 x half>, ptr %a
+ store <vscale x 14 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv15f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv15f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 15 x half>, ptr %a
+ store <vscale x 15 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv16f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv16f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 16 x half>, ptr %a
+ store <vscale x 16 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x float>, ptr %a
+ store <vscale x 1 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x float>, ptr %a
+ store <vscale x 2 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x float>, ptr %a
+ store <vscale x 3 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x float>, ptr %a
+ store <vscale x 4 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv5f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv5f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x float>, ptr %a
+ store <vscale x 5 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv6f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv6f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1w { z1.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: st1w { z1.d }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x float>, ptr %a
+ store <vscale x 6 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv7f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv7f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x float>, ptr %a
+ store <vscale x 7 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv8f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv8f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x float>, ptr %a
+ store <vscale x 8 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1f64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x double>, ptr %a
+ store <vscale x 1 x double> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2f64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x double>, ptr %a
+ store <vscale x 2 x double> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3f64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1d { z0.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1d { z1.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x double>, ptr %a
+ store <vscale x 3 x double> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4f64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x double>, ptr %a
+ store <vscale x 4 x double> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x bfloat>, ptr %a
+ store <vscale x 1 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x bfloat>, ptr %a
+ store <vscale x 2 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x bfloat>, ptr %a
+ store <vscale x 3 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x bfloat>, ptr %a
+ store <vscale x 4 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv5bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv5bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x bfloat>, ptr %a
+ store <vscale x 5 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv6bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv6bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: st1h { z1.s }, p1, [x1]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x bfloat>, ptr %a
+ store <vscale x 6 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv7bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv7bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x bfloat>, ptr %a
+ store <vscale x 7 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv8bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv8bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x bfloat>, ptr %a
+ store <vscale x 8 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv9bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv9bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #9 // =0x9
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 9 x bfloat>, ptr %a
+ store <vscale x 9 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv10bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv10bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: st1h { z1.d }, p0, [x1, #4, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 10 x bfloat>, ptr %a
+ store <vscale x 10 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv11bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv11bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #11 // =0xb
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 11 x bfloat>, ptr %a
+ store <vscale x 11 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv12bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv12bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1h { z1.s }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: st1h { z1.s }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 12 x bfloat>, ptr %a
+ store <vscale x 12 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv13bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv13bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #13 // =0xd
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 13 x bfloat>, ptr %a
+ store <vscale x 13 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv14bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv14bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z2, [x0]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: st1h { z1.s }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [x1, #6, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 14 x bfloat>, ptr %a
+ store <vscale x 14 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv15bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv15bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 15 x bfloat>, ptr %a
+ store <vscale x 15 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv16bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv16bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 16 x bfloat>, ptr %a
+ store <vscale x 16 x bfloat> %c, ptr %b
+ ret void
+}
+
+define <vscale x 1 x i16> @sve_sextload_nxv1i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv1i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i8>, ptr %a
+ %c.sext = sext <vscale x 1 x i8> %c to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %c.sext
+}
+
+define <vscale x 2 x i16> @sve_sextload_nxv2i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv2i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i8>, ptr %a
+ %c.sext = sext <vscale x 2 x i8> %c to <vscale x 2 x i16>
+ ret <vscale x 2 x i16> %c.sext
+}
+
+define <vscale x 3 x i16> @sve_sextload_nxv3i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv3i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i8>, ptr %a
+ %c.sext = sext <vscale x 3 x i8> %c to <vscale x 3 x i16>
+ ret <vscale x 3 x i16> %c.sext
+}
+
+define <vscale x 4 x i16> @sve_sextload_nxv4i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i8>, ptr %a
+ %c.sext = sext <vscale x 4 x i8> %c to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %c.sext
+}
+
+define <vscale x 5 x i16> @sve_sextload_nxv5i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv5i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i8>, ptr %a
+ %c.sext = sext <vscale x 5 x i8> %c to <vscale x 5 x i16>
+ ret <vscale x 5 x i16> %c.sext
+}
+
+define <vscale x 6 x i16> @sve_sextload_nxv6i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv6i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cntd x8, all, mul #3
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i8>, ptr %a
+ %c.sext = sext <vscale x 6 x i8> %c to <vscale x 6 x i16>
+ ret <vscale x 6 x i16> %c.sext
+}
+
+define <vscale x 7 x i16> @sve_sextload_nxv7i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv7i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i8>, ptr %a
+ %c.sext = sext <vscale x 7 x i8> %c to <vscale x 7 x i16>
+ ret <vscale x 7 x i16> %c.sext
+}
+
+define <vscale x 8 x i16> @sve_sextload_nxv8i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i8>, ptr %a
+ %c.sext = sext <vscale x 8 x i8> %c to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %c.sext
+}
+
+define <vscale x 9 x i16> @sve_sextload_nxv9i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv9i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #9 // =0x9
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 9 x i8>, ptr %a
+ %c.sext = sext <vscale x 9 x i8> %c to <vscale x 9 x i16>
+ ret <vscale x 9 x i16> %c.sext
+}
+
+define <vscale x 10 x i16> @sve_sextload_nxv10i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv10i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntd x8, all, mul #5
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: str z1, [sp]
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [sp, #4, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 10 x i8>, ptr %a
+ %c.sext = sext <vscale x 10 x i8> %c to <vscale x 10 x i16>
+ ret <vscale x 10 x i16> %c.sext
+}
+
+define <vscale x 11 x i16> @sve_sextload_nxv11i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv11i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #11 // =0xb
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 11 x i8>, ptr %a
+ %c.sext = sext <vscale x 11 x i8> %c to <vscale x 11 x i16>
+ ret <vscale x 11 x i16> %c.sext
+}
+
+define <vscale x 12 x i16> @sve_sextload_nxv12i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv12i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntw x8, all, mul #3
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: str z1, [sp]
+; CHECK-NEXT: st1h { z0.s }, p1, [sp, #2, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 12 x i8>, ptr %a
+ %c.sext = sext <vscale x 12 x i8> %c to <vscale x 12 x i16>
+ ret <vscale x 12 x i16> %c.sext
+}
+
+define <vscale x 13 x i16> @sve_sextload_nxv13i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv13i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #13 // =0xd
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 13 x i8>, ptr %a
+ %c.sext = sext <vscale x 13 x i8> %c to <vscale x 13 x i16>
+ ret <vscale x 13 x i16> %c.sext
+}
+
+define <vscale x 14 x i16> @sve_sextload_nxv14i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv14i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntd x8, all, mul #7
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1sb { z2.h }, p0/z, [x0]
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: str z2, [sp]
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1h { z0.s }, p1, [sp, #2, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: st1h { z1.d }, p0, [sp, #6, mul vl]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 14 x i8>, ptr %a
+ %c.sext = sext <vscale x 14 x i8> %c to <vscale x 14 x i16>
+ ret <vscale x 14 x i16> %c.sext
+}
+
+define <vscale x 15 x i16> @sve_sextload_nxv15i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv15i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 15 x i8>, ptr %a
+ %c.sext = sext <vscale x 15 x i8> %c to <vscale x 15 x i16>
+ ret <vscale x 15 x i16> %c.sext
+}
+
+define <vscale x 16 x i16> @sve_sextload_nxv16i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 16 x i8>, ptr %a
+ %c.sext = sext <vscale x 16 x i8> %c to <vscale x 16 x i16>
+ ret <vscale x 16 x i16> %c.sext
+}
+
+define <vscale x 1 x i32> @sve_sextload_nxv1i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv1i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i16>, ptr %a
+ %c.sext = sext <vscale x 1 x i16> %c to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %c.sext
+}
+
+define <vscale x 2 x i32> @sve_sextload_nxv2i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv2i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i16>, ptr %a
+ %c.sext = sext <vscale x 2 x i16> %c to <vscale x 2 x i32>
+ ret <vscale x 2 x i32> %c.sext
+}
+
+define <vscale x 3 x i32> @sve_sextload_nxv3i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv3i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i16>, ptr %a
+ %c.sext = sext <vscale x 3 x i16> %c to <vscale x 3 x i32>
+ ret <vscale x 3 x i32> %c.sext
+}
+
+define <vscale x 4 x i32> @sve_sextload_nxv4i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i16>, ptr %a
+ %c.sext = sext <vscale x 4 x i16> %c to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %c.sext
+}
+
+define <vscale x 5 x i32> @sve_sextload_nxv5i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv5i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i16>, ptr %a
+ %c.sext = sext <vscale x 5 x i16> %c to <vscale x 5 x i32>
+ ret <vscale x 5 x i32> %c.sext
+}
+
+define <vscale x 6 x i32> @sve_sextload_nxv6i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv6i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntd x8, all, mul #3
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: str z1, [sp]
+; CHECK-NEXT: st1w { z0.d }, p1, [sp, #2, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i16>, ptr %a
+ %c.sext = sext <vscale x 6 x i16> %c to <vscale x 6 x i32>
+ ret <vscale x 6 x i32> %c.sext
+}
+
+define <vscale x 7 x i32> @sve_sextload_nxv7i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv7i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i16>, ptr %a
+ %c.sext = sext <vscale x 7 x i16> %c to <vscale x 7 x i32>
+ ret <vscale x 7 x i32> %c.sext
+}
+
+define <vscale x 8 x i32> @sve_sextload_nxv8i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i16>, ptr %a
+ %c.sext = sext <vscale x 8 x i16> %c to <vscale x 8 x i32>
+ ret <vscale x 8 x i32> %c.sext
+}
+
+define <vscale x 1 x i64> @sve_sextload_nxv1i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv1i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i32>, ptr %a
+ %c.sext = sext <vscale x 1 x i32> %c to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %c.sext
+}
+
+define <vscale x 2 x i64> @sve_sextload_nxv2i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i32>, ptr %a
+ %c.sext = sext <vscale x 2 x i32> %c to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %c.sext
+}
+
+define <vscale x 3 x i64> @sve_sextload_nxv3i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv3i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sw { z0.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i32>, ptr %a
+ %c.sext = sext <vscale x 3 x i32> %c to <vscale x 3 x i64>
+ ret <vscale x 3 x i64> %c.sext
+}
+
+define <vscale x 4 x i64> @sve_sextload_nxv4i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i32>, ptr %a
+ %c.sext = sext <vscale x 4 x i32> %c to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %c.sext
+}
+
+define <vscale x 1 x i16> @sve_zextload_nxv1i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv1i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i8>, ptr %a
+ %c.zext = sext <vscale x 1 x i8> %c to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %c.zext
+}
+
+define <vscale x 2 x i16> @sve_zextload_nxv2i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv2i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i8>, ptr %a
+ %c.zext = sext <vscale x 2 x i8> %c to <vscale x 2 x i16>
+ ret <vscale x 2 x i16> %c.zext
+}
+
+define <vscale x 3 x i16> @sve_zextload_nxv3i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv3i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i8>, ptr %a
+ %c.zext = sext <vscale x 3 x i8> %c to <vscale x 3 x i16>
+ ret <vscale x 3 x i16> %c.zext
+}
+
+define <vscale x 4 x i16> @sve_zextload_nxv4i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i8>, ptr %a
+ %c.zext = sext <vscale x 4 x i8> %c to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %c.zext
+}
+
+define <vscale x 5 x i16> @sve_zextload_nxv5i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv5i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i8>, ptr %a
+ %c.zext = sext <vscale x 5 x i8> %c to <vscale x 5 x i16>
+ ret <vscale x 5 x i16> %c.zext
+}
+
+define <vscale x 6 x i16> @sve_zextload_nxv6i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv6i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cntd x8, all, mul #3
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i8>, ptr %a
+ %c.zext = sext <vscale x 6 x i8> %c to <vscale x 6 x i16>
+ ret <vscale x 6 x i16> %c.zext
+}
+
+define <vscale x 7 x i16> @sve_zextload_nxv7i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv7i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i8>, ptr %a
+ %c.zext = sext <vscale x 7 x i8> %c to <vscale x 7 x i16>
+ ret <vscale x 7 x i16> %c.zext
+}
+
+define <vscale x 8 x i16> @sve_zextload_nxv8i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i8>, ptr %a
+ %c.zext = sext <vscale x 8 x i8> %c to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %c.zext
+}
+
+define <vscale x 9 x i16> @sve_zextload_nxv9i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv9i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #9 // =0x9
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 9 x i8>, ptr %a
+ %c.zext = sext <vscale x 9 x i8> %c to <vscale x 9 x i16>
+ ret <vscale x 9 x i16> %c.zext
+}
+
+define <vscale x 10 x i16> @sve_zextload_nxv10i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv10i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntd x8, all, mul #5
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: str z1, [sp]
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [sp, #4, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 10 x i8>, ptr %a
+ %c.zext = sext <vscale x 10 x i8> %c to <vscale x 10 x i16>
+ ret <vscale x 10 x i16> %c.zext
+}
+
+define <vscale x 11 x i16> @sve_zextload_nxv11i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv11i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #11 // =0xb
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 11 x i8>, ptr %a
+ %c.zext = sext <vscale x 11 x i8> %c to <vscale x 11 x i16>
+ ret <vscale x 11 x i16> %c.zext
+}
+
+define <vscale x 12 x i16> @sve_zextload_nxv12i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv12i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntw x8, all, mul #3
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: str z1, [sp]
+; CHECK-NEXT: st1h { z0.s }, p1, [sp, #2, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 12 x i8>, ptr %a
+ %c.zext = sext <vscale x 12 x i8> %c to <vscale x 12 x i16>
+ ret <vscale x 12 x i16> %c.zext
+}
+
+define <vscale x 13 x i16> @sve_zextload_nxv13i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv13i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #13 // =0xd
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 13 x i8>, ptr %a
+ %c.zext = sext <vscale x 13 x i8> %c to <vscale x 13 x i16>
+ ret <vscale x 13 x i16> %c.zext
+}
+
+define <vscale x 14 x i16> @sve_zextload_nxv14i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv14i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntd x8, all, mul #7
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1sb { z2.h }, p0/z, [x0]
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: str z2, [sp]
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1h { z0.s }, p1, [sp, #2, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: st1h { z1.d }, p0, [sp, #6, mul vl]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 14 x i8>, ptr %a
+ %c.zext = sext <vscale x 14 x i8> %c to <vscale x 14 x i16>
+ ret <vscale x 14 x i16> %c.zext
+}
+
+define <vscale x 15 x i16> @sve_zextload_nxv15i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv15i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 15 x i8>, ptr %a
+ %c.zext = sext <vscale x 15 x i8> %c to <vscale x 15 x i16>
+ ret <vscale x 15 x i16> %c.zext
+}
+
+define <vscale x 16 x i16> @sve_zextload_nxv16i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 16 x i8>, ptr %a
+ %c.zext = sext <vscale x 16 x i8> %c to <vscale x 16 x i16>
+ ret <vscale x 16 x i16> %c.zext
+}
+
+define <vscale x 1 x i32> @sve_zextload_nxv1i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv1i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i16>, ptr %a
+ %c.zext = sext <vscale x 1 x i16> %c to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %c.zext
+}
+
+define <vscale x 2 x i32> @sve_zextload_nxv2i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv2i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i16>, ptr %a
+ %c.zext = sext <vscale x 2 x i16> %c to <vscale x 2 x i32>
+ ret <vscale x 2 x i32> %c.zext
+}
+
+define <vscale x 3 x i32> @sve_zextload_nxv3i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv3i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i16>, ptr %a
+ %c.zext = sext <vscale x 3 x i16> %c to <vscale x 3 x i32>
+ ret <vscale x 3 x i32> %c.zext
+}
+
+define <vscale x 4 x i32> @sve_zextload_nxv4i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i16>, ptr %a
+ %c.zext = sext <vscale x 4 x i16> %c to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %c.zext
+}
+
+define <vscale x 5 x i32> @sve_zextload_nxv5i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv5i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i16>, ptr %a
+ %c.zext = sext <vscale x 5 x i16> %c to <vscale x 5 x i32>
+ ret <vscale x 5 x i32> %c.zext
+}
+
+define <vscale x 6 x i32> @sve_zextload_nxv6i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv6i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntd x8, all, mul #3
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: str z1, [sp]
+; CHECK-NEXT: st1w { z0.d }, p1, [sp, #2, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i16>, ptr %a
+ %c.zext = sext <vscale x 6 x i16> %c to <vscale x 6 x i32>
+ ret <vscale x 6 x i32> %c.zext
+}
+
+define <vscale x 7 x i32> @sve_zextload_nxv7i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv7i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i16>, ptr %a
+ %c.zext = sext <vscale x 7 x i16> %c to <vscale x 7 x i32>
+ ret <vscale x 7 x i32> %c.zext
+}
+
+define <vscale x 8 x i32> @sve_zextload_nxv8i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i16>, ptr %a
+ %c.zext = sext <vscale x 8 x i16> %c to <vscale x 8 x i32>
+ ret <vscale x 8 x i32> %c.zext
+}
+
+define <vscale x 1 x i64> @sve_zextload_nxv1i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv1i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i32>, ptr %a
+ %c.zext = sext <vscale x 1 x i32> %c to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %c.zext
+}
+
+define <vscale x 2 x i64> @sve_zextload_nxv2i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i32>, ptr %a
+ %c.zext = sext <vscale x 2 x i32> %c to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %c.zext
+}
+
+define <vscale x 3 x i64> @sve_zextload_nxv3i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv3i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sw { z0.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i32>, ptr %a
+ %c.zext = sext <vscale x 3 x i32> %c to <vscale x 3 x i64>
+ ret <vscale x 3 x i64> %c.zext
+}
+
+define <vscale x 4 x i64> @sve_zextload_nxv4i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i32>, ptr %a
+ %c.zext = sext <vscale x 4 x i32> %c to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %c.zext
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll
index 2cbb29e..d8de12c 100644
--- a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll
+++ b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll
@@ -672,5 +672,3 @@ entry:
ret i32 %x
}
declare void @other()
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-FRAMELAYOUT: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
index 4a04934..6946cc2 100644
--- a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
+++ b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64 -O3 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64 -O3 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
declare void @t()
@@ -581,3 +581,323 @@ end:
ret void
}
+define ptr @tbnz_wzr(i1 %cmp1.not.i, ptr %locflg) {
+; CHECK-SD-LABEL: tbnz_wzr:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: tbz w0, #0, .LBB20_2
+; CHECK-SD-NEXT: // %bb.1:
+; CHECK-SD-NEXT: tbnz wzr, #0, .LBB20_3
+; CHECK-SD-NEXT: b .LBB20_4
+; CHECK-SD-NEXT: .LBB20_2: // %opnfil.exit.thread
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: tbz w8, #0, .LBB20_4
+; CHECK-SD-NEXT: .LBB20_3: // %if.else25
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: .LBB20_4: // %common.ret
+; CHECK-SD-NEXT: mov x0, xzr
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: tbnz_wzr:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, #0 // =0x0
+; CHECK-GI-NEXT: tbz w0, #0, .LBB20_3
+; CHECK-GI-NEXT: // %bb.1: // %if.end10
+; CHECK-GI-NEXT: tbnz w8, #0, .LBB20_4
+; CHECK-GI-NEXT: .LBB20_2: // %common.ret
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: .LBB20_3: // %opnfil.exit.thread
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: tbz w8, #0, .LBB20_2
+; CHECK-GI-NEXT: .LBB20_4: // %if.else25
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: ret
+entry:
+ br i1 %cmp1.not.i, label %if.end10, label %opnfil.exit.thread
+
+opnfil.exit.thread: ; preds = %entry
+ store i32 0, ptr %locflg, align 4
+ br label %if.end10
+
+if.end10: ; preds = %opnfil.exit.thread, %entry
+ %cmp5 = phi i1 [ true, %opnfil.exit.thread ], [ false, %entry ]
+ br i1 %cmp5, label %if.else25, label %if.then12
+
+if.then12: ; preds = %if.end10
+ %call20 = load i32, ptr null, align 4
+ br label %if.end26
+
+if.else25: ; preds = %if.end10
+ store i32 0, ptr %locflg, align 4
+ br label %if.end26
+
+if.end26: ; preds = %if.else25, %if.then12
+ br i1 %cmp5, label %common.ret, label %if.then28
+
+common.ret: ; preds = %if.then28, %if.end26
+ %common.ret.op = phi ptr [ null, %if.then28 ], [ null, %if.end26 ]
+ ret ptr %common.ret.op
+
+if.then28: ; preds = %if.end26
+ %0 = load ptr, ptr null, align 8
+ br label %common.ret
+}
+
+define ptr @tbz_wzr(i1 %cmp1.not.i, ptr %locflg) {
+; CHECK-SD-LABEL: tbz_wzr:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: tbz w0, #0, .LBB21_2
+; CHECK-SD-NEXT: // %bb.1:
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: tbnz w8, #0, .LBB21_3
+; CHECK-SD-NEXT: b .LBB21_4
+; CHECK-SD-NEXT: .LBB21_2: // %opnfil.exit.thread
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: tbz wzr, #0, .LBB21_4
+; CHECK-SD-NEXT: .LBB21_3: // %if.else25
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: .LBB21_4: // %common.ret
+; CHECK-SD-NEXT: mov x0, xzr
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: tbz_wzr:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: tbz w0, #0, .LBB21_3
+; CHECK-GI-NEXT: // %bb.1: // %if.end10
+; CHECK-GI-NEXT: tbnz w8, #0, .LBB21_4
+; CHECK-GI-NEXT: .LBB21_2: // %common.ret
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: .LBB21_3: // %opnfil.exit.thread
+; CHECK-GI-NEXT: mov w8, #0 // =0x0
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: tbz w8, #0, .LBB21_2
+; CHECK-GI-NEXT: .LBB21_4: // %if.else25
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: ret
+entry:
+ br i1 %cmp1.not.i, label %if.end10, label %opnfil.exit.thread
+
+opnfil.exit.thread: ; preds = %entry
+ store i32 0, ptr %locflg, align 4
+ br label %if.end10
+
+if.end10: ; preds = %opnfil.exit.thread, %entry
+ %cmp5 = phi i1 [ false, %opnfil.exit.thread ], [ true, %entry ]
+ br i1 %cmp5, label %if.else25, label %if.then12
+
+if.then12: ; preds = %if.end10
+ %call20 = load i32, ptr null, align 4
+ br label %if.end26
+
+if.else25: ; preds = %if.end10
+ store i32 0, ptr %locflg, align 4
+ br label %if.end26
+
+if.end26: ; preds = %if.else25, %if.then12
+ br i1 %cmp5, label %common.ret, label %if.then28
+
+common.ret: ; preds = %if.then28, %if.end26
+ %common.ret.op = phi ptr [ null, %if.then28 ], [ null, %if.end26 ]
+ ret ptr %common.ret.op
+
+if.then28: ; preds = %if.end26
+ %0 = load ptr, ptr null, align 8
+ br label %common.ret
+}
+
+define ptr @cbnz_wzr(i1 %cmp1.not.i, ptr %locflg) {
+; CHECK-SD-LABEL: cbnz_wzr:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: tbz w0, #0, .LBB22_2
+; CHECK-SD-NEXT: // %bb.1:
+; CHECK-SD-NEXT: cbnz wzr, .LBB22_3
+; CHECK-SD-NEXT: b .LBB22_4
+; CHECK-SD-NEXT: .LBB22_2: // %opnfil.exit.thread
+; CHECK-SD-NEXT: mov w8, #10 // =0xa
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: cbz w8, .LBB22_4
+; CHECK-SD-NEXT: .LBB22_3: // %if.else25
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: .LBB22_4: // %common.ret
+; CHECK-SD-NEXT: mov x0, xzr
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cbnz_wzr:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, wzr
+; CHECK-GI-NEXT: tbnz w0, #0, .LBB22_2
+; CHECK-GI-NEXT: // %bb.1: // %opnfil.exit.thread
+; CHECK-GI-NEXT: mov w8, #10 // =0xa
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: .LBB22_2: // %if.end10
+; CHECK-GI-NEXT: cbz w8, .LBB22_4
+; CHECK-GI-NEXT: // %bb.3: // %if.else25
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: .LBB22_4: // %common.ret
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: ret
+entry:
+ br i1 %cmp1.not.i, label %if.end10, label %opnfil.exit.thread
+
+opnfil.exit.thread: ; preds = %entry
+ store i32 0, ptr %locflg, align 4
+ br label %if.end10
+
+if.end10: ; preds = %opnfil.exit.thread, %entry
+ %cmp5 = phi i32 [ 10, %opnfil.exit.thread ], [ 0, %entry ]
+ %cmp5b = icmp ne i32 %cmp5, 0
+ br i1 %cmp5b, label %if.else25, label %if.then12
+
+if.then12: ; preds = %if.end10
+ %call20 = load i32, ptr null, align 4
+ br label %if.end26
+
+if.else25: ; preds = %if.end10
+ store i32 0, ptr %locflg, align 4
+ br label %if.end26
+
+if.end26: ; preds = %if.else25, %if.then12
+ br i1 %cmp5b, label %common.ret, label %if.then28
+
+common.ret: ; preds = %if.then28, %if.end26
+ %common.ret.op = phi ptr [ null, %if.then28 ], [ null, %if.end26 ]
+ ret ptr %common.ret.op
+
+if.then28: ; preds = %if.end26
+ %0 = load ptr, ptr null, align 8
+ br label %common.ret
+}
+
+define ptr @cbz_wzr(i1 %cmp1.not.i, ptr %locflg) {
+; CHECK-SD-LABEL: cbz_wzr:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: tbz w0, #0, .LBB23_2
+; CHECK-SD-NEXT: // %bb.1:
+; CHECK-SD-NEXT: mov w8, #10 // =0xa
+; CHECK-SD-NEXT: cbnz w8, .LBB23_3
+; CHECK-SD-NEXT: b .LBB23_4
+; CHECK-SD-NEXT: .LBB23_2: // %opnfil.exit.thread
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: cbz wzr, .LBB23_4
+; CHECK-SD-NEXT: .LBB23_3: // %if.else25
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: .LBB23_4: // %common.ret
+; CHECK-SD-NEXT: mov x0, xzr
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cbz_wzr:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, #10 // =0xa
+; CHECK-GI-NEXT: tbnz w0, #0, .LBB23_2
+; CHECK-GI-NEXT: // %bb.1: // %opnfil.exit.thread
+; CHECK-GI-NEXT: mov w8, wzr
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: .LBB23_2: // %if.end10
+; CHECK-GI-NEXT: cbz w8, .LBB23_4
+; CHECK-GI-NEXT: // %bb.3: // %if.else25
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: .LBB23_4: // %common.ret
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: ret
+entry:
+ br i1 %cmp1.not.i, label %if.end10, label %opnfil.exit.thread
+
+opnfil.exit.thread: ; preds = %entry
+ store i32 0, ptr %locflg, align 4
+ br label %if.end10
+
+if.end10: ; preds = %opnfil.exit.thread, %entry
+ %cmp5 = phi i32 [ 0, %opnfil.exit.thread ], [ 10, %entry ]
+ %cmp5b = icmp ne i32 %cmp5, 0
+ br i1 %cmp5b, label %if.else25, label %if.then12
+
+if.then12: ; preds = %if.end10
+ %call20 = load i32, ptr null, align 4
+ br label %if.end26
+
+if.else25: ; preds = %if.end10
+ store i32 0, ptr %locflg, align 4
+ br label %if.end26
+
+if.end26: ; preds = %if.else25, %if.then12
+ br i1 %cmp5b, label %common.ret, label %if.then28
+
+common.ret: ; preds = %if.then28, %if.end26
+ %common.ret.op = phi ptr [ null, %if.then28 ], [ null, %if.end26 ]
+ ret ptr %common.ret.op
+
+if.then28: ; preds = %if.end26
+ %0 = load ptr, ptr null, align 8
+ br label %common.ret
+}
+
+define i1 @avifSequenceHeaderParse() {
+; CHECK-SD-LABEL: avifSequenceHeaderParse:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: cbz w8, .LBB24_2
+; CHECK-SD-NEXT: .LBB24_1: // %bb6
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB24_2: // %bb1
+; CHECK-SD-NEXT: cbz w8, .LBB24_4
+; CHECK-SD-NEXT: // %bb.3:
+; CHECK-SD-NEXT: tbz xzr, #63, .LBB24_1
+; CHECK-SD-NEXT: b .LBB24_5
+; CHECK-SD-NEXT: .LBB24_4: // %bb2
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: tbz x8, #63, .LBB24_1
+; CHECK-SD-NEXT: .LBB24_5: // %bb4
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: avifSequenceHeaderParse:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: ret
+entry:
+ %a = icmp slt i64 0, 0
+ br i1 %a, label %bb1, label %bb6
+
+bb1: ; preds = %entry
+ %b = icmp eq i32 1, 0
+ br i1 %b, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %c = load i8, ptr null, align 1
+ %d = zext i8 1 to i64
+ %e = shl i64 %d, 0
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1
+ %f = phi i64 [ %e, %bb2 ], [ 0, %bb1 ]
+ %g = icmp slt i64 %f, 0
+ br i1 %g, label %bb4, label %bb6
+
+bb4: ; preds = %bb3
+ %h = icmp eq i32 1, 0
+ br i1 %h, label %bb5, label %bb7
+
+bb5: ; preds = %bb4
+ %i = load i8, ptr null, align 1
+ %j = shl i64 0, 0
+ br label %bb7
+
+bb6: ; preds = %bb7, %bb3, %entry
+ %k = phi i1 [ false, %bb7 ], [ false, %bb3 ], [ false, %entry ]
+ ret i1 %k
+
+bb7: ; preds = %bb5, %bb4
+ %l = phi ptr [ inttoptr (i64 1 to ptr), %bb5 ], [ null, %bb4 ]
+ %m = phi i64 [ %j, %bb5 ], [ 0, %bb4 ]
+ %n = icmp ult ptr %l, null
+ br label %bb6
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
index 666523c..ff618c0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
@@ -1812,26 +1812,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, s16
-; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
+; GFX12-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], null offen
; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1854,27 +1854,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, s16
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX11-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], 0 offen
; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1906,28 +1906,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v6, s20
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB14_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1937,28 +1935,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v6, s20
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB14_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
index 3515028..007417c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
@@ -1812,26 +1812,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, s16
-; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
+; GFX12-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], null offen
; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1854,27 +1854,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, s16
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX11-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], 0 offen
; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1906,28 +1906,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v6, s20
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB14_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1937,28 +1935,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v6, s20
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB14_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll
index ba5a8e9..9e412b6 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll
@@ -209,48 +209,48 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1)
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v2
; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s10, v3, v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v6, s9
-; GFX8-NEXT: v_mov_b32_e32 v5, s11
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, s8, v0
; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s11, v4, v[1:2]
-; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s8, v0
-; GFX8-NEXT: v_subb_u32_e64 v6, s[0:1], v6, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s9
+; GFX8-NEXT: v_mov_b32_e32 v5, s11
+; GFX8-NEXT: v_subb_u32_e64 v7, s[0:1], v2, v1, vcc
; GFX8-NEXT: v_sub_u32_e64 v0, s[0:1], s9, v1
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v6
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v7
; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v2
-; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v6
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v6
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1]
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v7
; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[0:1]
-; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s10, v2
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1]
+; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s10, v6
; GFX8-NEXT: v_subbrev_u32_e64 v8, s[0:1], 0, v0, vcc
; GFX8-NEXT: v_add_u32_e64 v9, s[0:1], 1, v4
; GFX8-NEXT: v_addc_u32_e64 v10, s[0:1], 0, v3, s[0:1]
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v8
; GFX8-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v7
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v2
; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc
; GFX8-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1]
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v8
-; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s10, v7
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s10, v2
; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v12, s[0:1]
; GFX8-NEXT: v_add_u32_e64 v12, s[0:1], 1, v9
; GFX8-NEXT: v_subbrev_u32_e32 v14, vcc, 0, v0, vcc
; GFX8-NEXT: v_addc_u32_e64 v13, s[0:1], 0, v10, s[0:1]
; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
; GFX8-NEXT: v_cndmask_b32_e32 v0, v9, v12, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v9, v10, v13, vcc
; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v9, v10, v13, vcc
; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, v0, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v9, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v5, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v14, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v4, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc
; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v9, s[0:1]
; GFX8-NEXT: v_mov_b32_e32 v5, s5
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v8, v14, vcc
; GFX8-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; GFX8-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[0:1]
; GFX8-NEXT: v_mov_b32_e32 v1, s7
; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
@@ -299,7 +299,6 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1)
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v4, v1, vcc
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s2, v3, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, s19
; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s2, v4, v[1:2]
; GFX9-NEXT: v_mul_hi_u32 v6, v3, v0
; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s3, v3, v[1:2]
@@ -346,30 +345,30 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1)
; GFX9-NEXT: v_add_u32_e32 v3, v4, v3
; GFX9-NEXT: v_add3_u32 v3, v3, v2, v6
; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s18, v3, v[1:2]
-; GFX9-NEXT: v_mov_b32_e32 v6, s17
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_sub_co_u32_e32 v7, vcc, s16, v0
; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s19, v5, v[1:2]
-; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, s16, v0
-; GFX9-NEXT: v_subb_co_u32_e64 v6, s[0:1], v6, v1, vcc
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v6
+; GFX9-NEXT: v_mov_b32_e32 v2, s17
+; GFX9-NEXT: v_mov_b32_e32 v4, s19
+; GFX9-NEXT: v_subb_co_u32_e64 v8, s[0:1], v2, v1, vcc
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v8
; GFX9-NEXT: v_sub_u32_e32 v0, s17, v1
; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v2
-; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v6
-; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[0:1]
-; GFX9-NEXT: v_subrev_co_u32_e32 v8, vcc, s18, v2
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v7
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v8
+; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1]
+; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s18, v7
; GFX9-NEXT: v_subbrev_co_u32_e64 v9, s[0:1], 0, v0, vcc
; GFX9-NEXT: v_add_co_u32_e64 v10, s[0:1], 1, v5
; GFX9-NEXT: v_addc_co_u32_e64 v11, s[0:1], 0, v3, s[0:1]
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v9
; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v8
-; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v7, vcc
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v4, vcc
; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1]
; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v9
-; GFX9-NEXT: v_subrev_co_u32_e32 v7, vcc, s18, v8
+; GFX9-NEXT: v_subrev_co_u32_e32 v4, vcc, s18, v2
; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, v13, s[0:1]
; GFX9-NEXT: v_add_co_u32_e64 v13, s[0:1], 1, v10
; GFX9-NEXT: v_subbrev_co_u32_e32 v15, vcc, 0, v0, vcc
@@ -378,14 +377,15 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1)
; GFX9-NEXT: v_cndmask_b32_e32 v0, v10, v13, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v14, vcc
; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
; GFX9-NEXT: v_cndmask_b32_e64 v0, v5, v0, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, v10, s[0:1]
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v15, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v5, s[0:1]
-; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[12:13]
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[14:15]
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v9, v15, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v2, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v3, s[0:1]
+; GFX9-NEXT: global_store_dwordx2 v6, v[0:1], s[12:13]
+; GFX9-NEXT: global_store_dwordx2 v6, v[2:3], s[14:15]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: udivrem_i64:
@@ -1070,6 +1070,7 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
; GFX8-NEXT: v_mul_lo_u32 v3, s8, v1
; GFX8-NEXT: v_mul_hi_u32 v4, s8, v0
; GFX8-NEXT: v_mul_hi_u32 v0, s9, v0
+; GFX8-NEXT: v_mov_b32_e32 v5, s13
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4
@@ -1082,184 +1083,183 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v4, v3
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v0, v2
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, v0, v2
; GFX8-NEXT: v_mul_hi_u32 v4, s9, v1
-; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s12, v6, 0
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s12, v7, 0
; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, v4, v2
-; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s12, v7, v[1:2]
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, v4, v2
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s12, v8, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s13, v7, v[1:2]
; GFX8-NEXT: v_mov_b32_e32 v3, s9
-; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s8, v0
-; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s13, v6, v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v4, s13
-; GFX8-NEXT: v_subb_u32_e64 v0, s[0:1], v3, v1, vcc
-; GFX8-NEXT: v_sub_u32_e64 v1, s[0:1], s9, v1
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, s8, v0
+; GFX8-NEXT: v_subb_u32_e64 v0, s[0:1], v3, v2, vcc
+; GFX8-NEXT: v_sub_u32_e64 v2, s[0:1], s9, v2
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v0
-; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v8
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1]
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v1
+; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1]
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s13, v0
-; GFX8-NEXT: v_cndmask_b32_e64 v9, v2, v3, s[0:1]
-; GFX8-NEXT: v_cvt_f32_u32_e32 v2, s15
-; GFX8-NEXT: v_cvt_f32_u32_e32 v3, s14
-; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v1, v4, vcc
-; GFX8-NEXT: v_mul_f32_e32 v1, 0x4f800000, v2
-; GFX8-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX8-NEXT: v_rcp_iflag_f32_e32 v1, v1
-; GFX8-NEXT: v_subrev_u32_e32 v10, vcc, s12, v8
-; GFX8-NEXT: v_subbrev_u32_e64 v11, s[0:1], 0, v5, vcc
-; GFX8-NEXT: v_mul_f32_e32 v1, 0x5f7ffffc, v1
-; GFX8-NEXT: v_mul_f32_e32 v2, 0x2f800000, v1
-; GFX8-NEXT: v_trunc_f32_e32 v3, v2
-; GFX8-NEXT: v_mul_f32_e32 v2, 0xcf800000, v3
-; GFX8-NEXT: v_add_f32_e32 v1, v2, v1
-; GFX8-NEXT: v_cvt_u32_f32_e32 v12, v1
-; GFX8-NEXT: v_add_u32_e64 v13, s[0:1], 1, v6
-; GFX8-NEXT: v_addc_u32_e64 v14, s[0:1], 0, v7, s[0:1]
-; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s2, v12, 0
-; GFX8-NEXT: v_cvt_u32_f32_e32 v15, v3
+; GFX8-NEXT: v_cndmask_b32_e64 v9, v3, v4, s[0:1]
+; GFX8-NEXT: v_cvt_f32_u32_e32 v3, s15
+; GFX8-NEXT: v_cvt_f32_u32_e32 v4, s14
+; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v2, v5, vcc
+; GFX8-NEXT: v_mul_f32_e32 v2, 0x4f800000, v3
+; GFX8-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX8-NEXT: v_rcp_iflag_f32_e32 v2, v2
+; GFX8-NEXT: v_subrev_u32_e32 v10, vcc, s12, v1
+; GFX8-NEXT: v_subbrev_u32_e64 v11, s[0:1], 0, v6, vcc
+; GFX8-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2
+; GFX8-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2
+; GFX8-NEXT: v_trunc_f32_e32 v4, v3
+; GFX8-NEXT: v_mul_f32_e32 v3, 0xcf800000, v4
+; GFX8-NEXT: v_add_f32_e32 v2, v3, v2
+; GFX8-NEXT: v_cvt_u32_f32_e32 v12, v2
+; GFX8-NEXT: v_add_u32_e64 v13, s[0:1], 1, v7
+; GFX8-NEXT: v_addc_u32_e64 v14, s[0:1], 0, v8, s[0:1]
+; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v12, 0
+; GFX8-NEXT: v_cvt_u32_f32_e32 v15, v4
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v11
; GFX8-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[0:1]
-; GFX8-NEXT: v_subb_u32_e32 v4, vcc, v5, v4, vcc
-; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v15, v[2:3]
+; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v6, v5, vcc
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v15, v[3:4]
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v10
; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[0:1]
-; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s3, v12, v[2:3]
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s3, v12, v[3:4]
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s13, v11
; GFX8-NEXT: v_cndmask_b32_e64 v16, v16, v17, s[0:1]
-; GFX8-NEXT: v_mul_lo_u32 v3, v15, v1
-; GFX8-NEXT: v_mul_lo_u32 v17, v12, v2
-; GFX8-NEXT: v_mul_hi_u32 v5, v12, v1
-; GFX8-NEXT: v_mul_hi_u32 v1, v15, v1
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v17
+; GFX8-NEXT: v_mul_lo_u32 v4, v15, v2
+; GFX8-NEXT: v_mul_lo_u32 v17, v12, v3
+; GFX8-NEXT: v_mul_hi_u32 v6, v12, v2
+; GFX8-NEXT: v_mul_hi_u32 v2, v15, v2
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v17
; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5
-; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; GFX8-NEXT: v_mul_lo_u32 v5, v15, v2
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v17, v3
-; GFX8-NEXT: v_mul_hi_u32 v17, v12, v2
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, v5, v1
-; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v17
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v6
+; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX8-NEXT: v_mul_lo_u32 v6, v15, v3
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v17, v4
+; GFX8-NEXT: v_mul_hi_u32 v17, v12, v3
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v6, v2
+; GFX8-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v17
; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v17
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v17
; GFX8-NEXT: v_add_u32_e32 v17, vcc, 1, v13
; GFX8-NEXT: v_addc_u32_e32 v18, vcc, 0, v14, vcc
; GFX8-NEXT: v_subrev_u32_e32 v19, vcc, s12, v10
-; GFX8-NEXT: v_mul_hi_u32 v2, v15, v2
-; GFX8-NEXT: v_subbrev_u32_e32 v20, vcc, 0, v4, vcc
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3
-; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v3
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
-; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v1
-; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v12, 0
-; GFX8-NEXT: v_addc_u32_e32 v15, vcc, v15, v2, vcc
+; GFX8-NEXT: v_mul_hi_u32 v3, v15, v3
+; GFX8-NEXT: v_subbrev_u32_e32 v20, vcc, 0, v5, vcc
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4
+; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v4
+; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v2
+; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v12, 0
+; GFX8-NEXT: v_addc_u32_e32 v15, vcc, v15, v3, vcc
; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v13, v17, vcc
-; GFX8-NEXT: v_mov_b32_e32 v1, v4
-; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v15, v[1:2]
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, v5
+; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s2, v15, v[2:3]
; GFX8-NEXT: v_cndmask_b32_e32 v13, v14, v18, vcc
; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v9
-; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s3, v12, v[4:5]
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v6, v2, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v2, v7, v13, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v19, vcc
-; GFX8-NEXT: v_mul_lo_u32 v7, v15, v3
-; GFX8-NEXT: v_mul_lo_u32 v9, v12, v4
-; GFX8-NEXT: v_cndmask_b32_e64 v5, v8, v5, s[0:1]
-; GFX8-NEXT: v_mul_hi_u32 v8, v12, v3
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v11, v20, vcc
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v9
-; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8
-; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GFX8-NEXT: v_mul_lo_u32 v8, v15, v4
-; GFX8-NEXT: v_mul_hi_u32 v3, v15, v3
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, v9, v7
+; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[2:3], s3, v12, v[5:6]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v7, v3, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v8, v13, s[0:1]
+; GFX8-NEXT: v_mul_lo_u32 v7, v15, v4
+; GFX8-NEXT: v_mul_lo_u32 v8, v12, v5
; GFX8-NEXT: v_mul_hi_u32 v9, v12, v4
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v8, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v19, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v10, v11, v20, vcc
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8
; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v9
-; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v9
-; GFX8-NEXT: v_mul_hi_u32 v4, v15, v4
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v7
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v9
; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GFX8-NEXT: v_mul_lo_u32 v9, v15, v5
+; GFX8-NEXT: v_mul_hi_u32 v4, v15, v4
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v8, v7
+; GFX8-NEXT: v_mul_hi_u32 v8, v12, v5
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v9, v4
+; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8
+; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, v9, v8
+; GFX8-NEXT: v_mul_hi_u32 v5, v15, v5
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v7
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v12, v3
-; GFX8-NEXT: v_addc_u32_e32 v4, vcc, v15, v4, vcc
-; GFX8-NEXT: v_mul_lo_u32 v7, s11, v3
-; GFX8-NEXT: v_mul_lo_u32 v8, s10, v4
-; GFX8-NEXT: v_cndmask_b32_e64 v6, v0, v6, s[0:1]
-; GFX8-NEXT: v_mul_hi_u32 v0, s10, v3
-; GFX8-NEXT: v_mul_hi_u32 v3, s11, v3
+; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, v8, v7
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v7
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v12, v4
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v15, v5, vcc
+; GFX8-NEXT: v_mul_lo_u32 v7, s11, v4
+; GFX8-NEXT: v_mul_lo_u32 v8, s10, v5
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v1, v6, s[0:1]
+; GFX8-NEXT: v_mul_hi_u32 v1, s10, v4
+; GFX8-NEXT: v_mul_hi_u32 v4, s11, v4
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8
; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, v7, v0
-; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX8-NEXT: v_mul_lo_u32 v7, s11, v4
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, v8, v0
-; GFX8-NEXT: v_mul_hi_u32 v8, s10, v4
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v7, v3
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v7, v1
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_mul_lo_u32 v7, s11, v5
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v8, v1
+; GFX8-NEXT: v_mul_hi_u32 v8, s10, v5
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v7, v4
; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v8
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8
; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8
-; GFX8-NEXT: v_add_u32_e32 v9, vcc, v3, v0
-; GFX8-NEXT: v_mul_hi_u32 v8, s11, v4
-; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s14, v9, 0
-; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, v7, v0
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, v8, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
-; GFX8-NEXT: v_mad_u64_u32 v[7:8], s[0:1], s14, v10, v[0:1]
-; GFX8-NEXT: v_mov_b32_e32 v4, s11
-; GFX8-NEXT: v_mov_b32_e32 v0, s15
-; GFX8-NEXT: v_mad_u64_u32 v[7:8], s[0:1], s15, v9, v[7:8]
-; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s10, v3
-; GFX8-NEXT: v_subb_u32_e64 v11, s[0:1], v4, v7, vcc
-; GFX8-NEXT: v_sub_u32_e64 v3, s[0:1], s11, v7
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v11
+; GFX8-NEXT: v_add_u32_e32 v11, vcc, v4, v1
+; GFX8-NEXT: v_mul_hi_u32 v8, s11, v5
+; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s14, v11, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v7, v1
+; GFX8-NEXT: v_add_u32_e32 v12, vcc, v8, v1
+; GFX8-NEXT: v_mov_b32_e32 v1, v5
+; GFX8-NEXT: v_mad_u64_u32 v[8:9], s[2:3], s14, v12, v[1:2]
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v0, v10, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s15
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s15, v11, v[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v1, s11
+; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s10, v4
+; GFX8-NEXT: v_subb_u32_e64 v1, s[0:1], v1, v0, vcc
+; GFX8-NEXT: v_sub_u32_e64 v0, s[0:1], s11, v0
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v1
; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1]
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v8
-; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v11
-; GFX8-NEXT: v_subb_u32_e32 v3, vcc, v3, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v7, s[0:1]
-; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s14, v8
-; GFX8-NEXT: v_subbrev_u32_e64 v12, s[0:1], 0, v3, vcc
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v12
+; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[0:1]
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v1
+; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[0:1]
+; GFX8-NEXT: v_subrev_u32_e32 v9, vcc, s14, v8
+; GFX8-NEXT: v_subbrev_u32_e64 v10, s[0:1], 0, v0, vcc
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v10
; GFX8-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v7
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v9
; GFX8-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v12
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v10
; GFX8-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[0:1]
-; GFX8-NEXT: v_add_u32_e64 v14, s[0:1], 1, v9
-; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v3, v0, vcc
-; GFX8-NEXT: v_addc_u32_e64 v15, s[0:1], 0, v10, s[0:1]
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, 1, v14
+; GFX8-NEXT: v_add_u32_e64 v14, s[0:1], 1, v11
+; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc
+; GFX8-NEXT: v_addc_u32_e64 v15, s[0:1], 0, v12, s[0:1]
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, 1, v14
; GFX8-NEXT: v_addc_u32_e32 v16, vcc, 0, v15, vcc
; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
-; GFX8-NEXT: v_subrev_u32_e64 v13, s[0:1], s14, v7
+; GFX8-NEXT: v_subrev_u32_e64 v13, s[0:1], s14, v9
; GFX8-NEXT: v_subbrev_u32_e64 v0, s[0:1], 0, v0, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v14, v3, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc
; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GFX8-NEXT: v_cndmask_b32_e64 v3, v9, v3, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v10, v14, s[0:1]
-; GFX8-NEXT: v_mov_b32_e32 v10, s5
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v13, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc
-; GFX8-NEXT: v_mov_b32_e32 v9, s4
-; GFX8-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v8, v11, v0, s[0:1]
-; GFX8-NEXT: flat_store_dwordx4 v[9:10], v[1:4]
+; GFX8-NEXT: v_cndmask_b32_e32 v9, v9, v13, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v14, v5, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v9, v1, v0, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v11, v5, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v12, v14, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v1, s5
+; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[5:8]
+; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[6:9]
; GFX8-NEXT: s_endpgm
;
; GFX9-LABEL: udivrem_v2i64:
@@ -1355,11 +1355,11 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
; GFX9-NEXT: v_add_u32_e32 v3, v4, v3
; GFX9-NEXT: v_add3_u32 v8, v3, v2, v5
; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s4, v8, v[1:2]
-; GFX9-NEXT: v_mov_b32_e32 v4, s17
; GFX9-NEXT: v_mov_b32_e32 v5, s5
; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s5, v7, v[1:2]
+; GFX9-NEXT: v_mov_b32_e32 v3, s17
; GFX9-NEXT: v_sub_co_u32_e32 v1, vcc, s16, v0
-; GFX9-NEXT: v_subb_co_u32_e64 v0, s[0:1], v4, v2, vcc
+; GFX9-NEXT: v_subb_co_u32_e64 v0, s[0:1], v3, v2, vcc
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s5, v0
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1]
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s4, v1
@@ -1387,7 +1387,7 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
; GFX9-NEXT: v_cvt_u32_f32_e32 v15, v4
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s5, v11
; GFX9-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[0:1]
-; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v6, v5, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v6, vcc, v6, v5, vcc
; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v15, v[3:4]
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s4, v10
; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[0:1]
@@ -1396,128 +1396,128 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
; GFX9-NEXT: v_cndmask_b32_e64 v16, v16, v17, s[0:1]
; GFX9-NEXT: v_mul_lo_u32 v4, v15, v2
; GFX9-NEXT: v_mul_lo_u32 v17, v12, v3
-; GFX9-NEXT: v_mul_hi_u32 v6, v12, v2
+; GFX9-NEXT: v_mul_hi_u32 v5, v12, v2
; GFX9-NEXT: v_mul_hi_u32 v2, v15, v2
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v17
; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v6
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v5
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; GFX9-NEXT: v_mul_lo_u32 v6, v15, v3
+; GFX9-NEXT: v_mul_lo_u32 v5, v15, v3
; GFX9-NEXT: v_add_u32_e32 v4, v17, v4
; GFX9-NEXT: v_mul_hi_u32 v17, v12, v3
; GFX9-NEXT: v_mul_hi_u32 v3, v15, v3
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v2
-; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v5, v2
+; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v17
; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc
-; GFX9-NEXT: v_add_u32_e32 v6, v6, v17
+; GFX9-NEXT: v_add_u32_e32 v5, v5, v17
; GFX9-NEXT: v_add_co_u32_e32 v17, vcc, 1, v13
; GFX9-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v14, vcc
-; GFX9-NEXT: v_subrev_co_u32_e32 v19, vcc, s4, v10
-; GFX9-NEXT: v_subbrev_co_u32_e32 v20, vcc, 0, v5, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, v12, v2
-; GFX9-NEXT: v_add3_u32 v3, v6, v4, v3
+; GFX9-NEXT: v_add3_u32 v3, v5, v4, v3
; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v12, 0
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, v15, v3, vcc
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc
; GFX9-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s2, v15, v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v13, v14, v18, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v15, v[2:3]
+; GFX9-NEXT: v_subrev_co_u32_e32 v19, vcc, s4, v10
+; GFX9-NEXT: v_subbrev_co_u32_e32 v20, vcc, 0, v6, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s3, v12, v[2:3]
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v13, v17, vcc
; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v9
-; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[2:3], s3, v12, v[5:6]
-; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v3, s[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v13, s[0:1]
-; GFX9-NEXT: v_mul_lo_u32 v7, v15, v4
-; GFX9-NEXT: v_mul_lo_u32 v8, v12, v5
-; GFX9-NEXT: v_cndmask_b32_e32 v6, v10, v19, vcc
-; GFX9-NEXT: v_mul_hi_u32 v10, v12, v4
-; GFX9-NEXT: v_cndmask_b32_e32 v9, v11, v20, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8
-; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v10
-; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GFX9-NEXT: v_mul_lo_u32 v10, v15, v5
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v13, s[0:1]
+; GFX9-NEXT: v_mul_lo_u32 v6, v15, v4
+; GFX9-NEXT: v_mul_lo_u32 v7, v12, v5
+; GFX9-NEXT: v_mul_hi_u32 v9, v12, v4
; GFX9-NEXT: v_mul_hi_u32 v4, v15, v4
-; GFX9-NEXT: v_add_u32_e32 v7, v8, v7
-; GFX9-NEXT: v_mul_hi_u32 v8, v12, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v18, vcc
+; GFX9-NEXT: v_add_co_u32_e64 v6, s[2:3], v6, v7
+; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[2:3]
+; GFX9-NEXT: v_add_co_u32_e64 v6, s[2:3], v6, v9
+; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[2:3]
+; GFX9-NEXT: v_mul_lo_u32 v9, v15, v5
+; GFX9-NEXT: v_add_u32_e32 v6, v7, v6
+; GFX9-NEXT: v_mul_hi_u32 v7, v12, v5
; GFX9-NEXT: v_mul_hi_u32 v5, v15, v5
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4
-; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8
-; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7
-; GFX9-NEXT: v_add_u32_e32 v8, v10, v8
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v9, v4
+; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[2:3]
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v4, v7
+; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[2:3]
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v4, v6
+; GFX9-NEXT: v_add_u32_e32 v7, v9, v7
+; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[2:3]
+; GFX9-NEXT: v_add3_u32 v5, v7, v6, v5
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v12, v4
+; GFX9-NEXT: v_addc_co_u32_e64 v5, s[2:3], v15, v5, s[2:3]
+; GFX9-NEXT: v_mul_lo_u32 v6, s19, v4
+; GFX9-NEXT: v_mul_lo_u32 v7, s18, v5
+; GFX9-NEXT: v_mul_hi_u32 v9, s18, v4
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v14, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v10, v19, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v20, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v7
; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GFX9-NEXT: v_add3_u32 v5, v8, v7, v5
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4
-; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc
-; GFX9-NEXT: v_mul_lo_u32 v7, s19, v4
-; GFX9-NEXT: v_mul_lo_u32 v8, s18, v5
-; GFX9-NEXT: v_cndmask_b32_e64 v6, v1, v6, s[0:1]
-; GFX9-NEXT: v_mul_hi_u32 v1, s18, v4
+; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v9
+; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX9-NEXT: v_mul_lo_u32 v9, s19, v5
; GFX9-NEXT: v_mul_hi_u32 v4, s19, v4
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8
-; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v7, v1
-; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX9-NEXT: v_mul_lo_u32 v7, s19, v5
-; GFX9-NEXT: v_add_u32_e32 v1, v8, v1
-; GFX9-NEXT: v_mul_hi_u32 v8, s18, v5
-; GFX9-NEXT: v_mul_hi_u32 v12, s19, v5
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v7, v4
-; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8
-; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v4, v1
+; GFX9-NEXT: v_add_u32_e32 v6, v7, v6
+; GFX9-NEXT: v_mul_hi_u32 v7, s18, v5
+; GFX9-NEXT: v_mul_hi_u32 v13, s19, v5
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v9, v4
+; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7
+; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v4, v6
; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s6, v11, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v7, v0, v9, s[0:1]
-; GFX9-NEXT: v_add_u32_e32 v0, v10, v8
-; GFX9-NEXT: v_add3_u32 v8, v0, v1, v12
-; GFX9-NEXT: v_mov_b32_e32 v0, v5
-; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s6, v8, v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v9, s19
+; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v1, v8, s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, v9, v7
+; GFX9-NEXT: v_add3_u32 v12, v1, v12, v13
+; GFX9-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[2:3], s6, v12, v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v0, v10, s[0:1]
; GFX9-NEXT: v_mov_b32_e32 v5, s7
-; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s7, v11, v[0:1]
-; GFX9-NEXT: v_sub_co_u32_e32 v1, vcc, s18, v4
-; GFX9-NEXT: v_subb_co_u32_e64 v9, s[0:1], v9, v0, vcc
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v9
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s7, v11, v[8:9]
+; GFX9-NEXT: v_mov_b32_e32 v1, s19
+; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, s18, v4
+; GFX9-NEXT: v_subb_co_u32_e64 v1, s[0:1], v1, v0, vcc
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v1
; GFX9-NEXT: v_sub_u32_e32 v0, s19, v0
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v1
-; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v9
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v8
+; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v1
; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v5, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1]
-; GFX9-NEXT: v_subrev_co_u32_e32 v10, vcc, s6, v1
-; GFX9-NEXT: v_subbrev_co_u32_e64 v12, s[0:1], 0, v0, vcc
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v12
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[0:1]
+; GFX9-NEXT: v_subrev_co_u32_e32 v9, vcc, s6, v8
+; GFX9-NEXT: v_subbrev_co_u32_e64 v10, s[0:1], 0, v0, vcc
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v10
; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v10
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v9
; GFX9-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v12
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v10
; GFX9-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[0:1]
; GFX9-NEXT: v_add_co_u32_e64 v14, s[0:1], 1, v11
; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v5, vcc
-; GFX9-NEXT: v_addc_co_u32_e64 v15, s[0:1], 0, v8, s[0:1]
+; GFX9-NEXT: v_addc_co_u32_e64 v15, s[0:1], 0, v12, s[0:1]
; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, 1, v14
; GFX9-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v15, vcc
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
; GFX9-NEXT: v_cndmask_b32_e32 v5, v14, v5, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc
-; GFX9-NEXT: v_subrev_co_u32_e64 v15, s[0:1], s6, v10
+; GFX9-NEXT: v_subrev_co_u32_e64 v15, s[0:1], s6, v9
; GFX9-NEXT: v_subbrev_co_u32_e64 v0, s[0:1], 0, v0, s[0:1]
; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4
; GFX9-NEXT: v_mov_b32_e32 v13, 0
; GFX9-NEXT: v_cndmask_b32_e64 v4, v11, v5, s[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v5, v8, v14, s[0:1]
-; GFX9-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v8, v1, v8, s[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v0, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v12, v14, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v9, v15, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v1, v0, s[0:1]
; GFX9-NEXT: global_store_dwordx4 v13, v[2:5], s[12:13]
; GFX9-NEXT: global_store_dwordx4 v13, v[6:9], s[14:15]
; GFX9-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
index d053425..7cc5051 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
@@ -1483,7 +1483,6 @@ define void @flat_atomic_xchg_i64_noret_av(ptr %ptr) #0 {
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
; GFX90A-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX90A-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_cbranch_execz .LBB20_2
; GFX90A-NEXT: .LBB20_4: ; %atomicrmw.private
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
index 815b9f2..df9c97f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
@@ -161654,177 +161654,175 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_clause 0x1f
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:244
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:240
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:236
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:232
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:228
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:224
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:220
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:216
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:212
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:208
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:204
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:200
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:196
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:192
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:188
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:184
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:180
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:176
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:172
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:168
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:164
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:160
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:156
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:152
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:148
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:144
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:140
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:136
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:132
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:128
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:124
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:120
-; GFX11-TRUE16-NEXT: s_clause 0x1a
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:116
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:112
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:108
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:104
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:100
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:96
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:92
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:88
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:84
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:80
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:76
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:72
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:68
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:64
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:60
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:56
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:52
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:48
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:44
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:40
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:36
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:32
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:28
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:24
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:20
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:16
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:112
+; GFX11-TRUE16-NEXT: s_clause 0x18
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:12
; GFX11-TRUE16-NEXT: s_clause 0x2
; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:8
-; GFX11-TRUE16-NEXT: scratch_load_b32 v85, off, s32 offset:4
-; GFX11-TRUE16-NEXT: scratch_load_b32 v84, off, s32
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr180_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr143_lo16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v99, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v98, off, s32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr178_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr152_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr179_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr142_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr141_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr177_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr140_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr183_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr139_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr127_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr62_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr125_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr40_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr138_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr179_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr137_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr56_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr47_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr123_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr110_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr79_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr111_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr74_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr60_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr109_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr107_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr111_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr106_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr95_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr89_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr76_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr93_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr90_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr138_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr79_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr77_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr127_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr89_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr104_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr75_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr72_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr137_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr142_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr73_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr125_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr63_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr61_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr59_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr154_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr57_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr143_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr152_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr46_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr44_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr141_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr47_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr124_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr124_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr122_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr109_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr106_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr110_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr94_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr104_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr94_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr92_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr90_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr88_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr77_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr74_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr72_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr88_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr62_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr59_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr164_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr76_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr57_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr163_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr73_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr63_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr46_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr44_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr166_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr60_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr165_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr56_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr41_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr178_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr40_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr177_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr183_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr182_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr181_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr180_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16
@@ -161838,135 +161836,135 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB90_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[15:16]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[116:117], 24, v[11:12]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[132:133], 24, v[7:8]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[133:134], 24, v[5:6]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[23:24]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[19:20]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 8, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[84:85], 24, v[27:28]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[13:14]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[11:12]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[9:10]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[130:131], 24, v[7:8]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[144:145], 24, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[85:86], 24, v[25:26]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 24, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 24, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 8, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v13
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v79, 8, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 8, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 24, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 8, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 24, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 8, v7
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 8, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v127, 24, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 8, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v141, 24, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v142, 8, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v143, 8, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 24, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 8, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 24, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v152, 8, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 24, v85
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v85
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 24, v99
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 8, v99
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v40, 8, v84
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 24, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v42, 8, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 8, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v56, 24, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v60, 8, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 24, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v76, 8, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 24, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 24, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v104, 8, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v106, 8, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 24, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 8, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v19
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 24, v18
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v18
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 8, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[112:113], 24, v[13:14]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[128:129], 24, v[9:10]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[134:135], 24, v[3:4]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[146:147], 24, v[1:2]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[84:85]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v98
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 24, v30
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 8, v30
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v74, 24, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 8, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 24, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[131:132], 24, v[5:6]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[145:146], 24, v[1:2]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[98:99]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[29:30]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[27:28]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[96:97], 24, v[25:26]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[23:24]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[102:103], 24, v[21:22]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[19:20]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[17:18]
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v1.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v2.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v65.h, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v43.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v40.h, v3.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.h, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v4.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v62.h, v5.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v56.h, v5.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.h, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v47.h, v6.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v42.h, v6.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v91.h, v7.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v79.h, v7.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v74.h, v8.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v60.h, v8.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.h, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v111.h, v9.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v98.h, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v89.h, v10.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v138.h, v11.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v108.h, v12.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v115.h, v12.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v153.h, v13.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.h, v13.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v137.h, v14.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.h, v14.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v154.h, v15.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.h, v15.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v152.h, v16.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v16.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.h, v17.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v106.h, v9.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.h, v9.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v76.h, v10.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.h, v10.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v127.h, v11.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.h, v11.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v104.h, v12.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v12.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v142.h, v13.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.h, v13.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v14.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v14.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v143.h, v15.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.h, v15.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v141.h, v16.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v16.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v17.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v17.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v18.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v18.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v150.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.h, v19.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v19.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v147.h, v20.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v20.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v160.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v150.h, v21.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v21.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v151.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v22.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v22.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v162.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v160.h, v23.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v161.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v151.h, v24.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v24.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v164.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v162.h, v25.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v163.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v161.h, v26.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v26.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v166.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v164.h, v27.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v165.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v163.h, v28.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v28.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v166.h, v29.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v165.h, v30.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v30.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v84.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v54.h, v84.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v85.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v85.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v98.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v54.h, v98.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v99.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v99.h
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5
@@ -161982,148 +161980,153 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99
; GFX11-TRUE16-NEXT: .LBB90_2: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB90_4
; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v17
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v32 :: v_dual_lshlrev_b32 v31, 16, v18
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v18
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v20
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v32 :: v_dual_add_f32 v31, 0x40c00000, v31
; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v34, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v31, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v31
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v32, v32, v31, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX11-TRUE16-NEXT: v_add3_u32 v31, v38, v34, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v39, v32, v37, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_add_f32 v18, 0x40c00000, v18 :: v_dual_lshlrev_b32 v17, 16, v17
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v18, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add3_u32 v35, v35, v18, 0x7fff
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_cndmask_b32 v32, v35, v36
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v17, 0x40c00000, v17
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v33
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v34
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v35, v36, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v17, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v17
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v39.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v37, v48, v17, 0x7fff
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v20
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 24, v32
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v32
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 24, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v32
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v148, v37, v49, vcc_lo
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v34
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v135, v37, v49, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff0000, v19
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v31, v33, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v33, v20, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v34 :: v_dual_add_f32 v19, 0x40c00000, v19
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v33, v33, v20, 0x7fff
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v34 :: v_dual_add_f32 v19, 0x40c00000, v19
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v33, v33, v20, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v135.h
; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v36, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v148.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v149, v33, v35, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v147, v33, v35, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36
-; GFX11-TRUE16-NEXT: v_add3_u32 v20, v20, v36, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v19
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v22
+; GFX11-TRUE16-NEXT: v_add3_u32 v20, v20, v36, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v31
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v34, v17, v34, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v19, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v34.l, v149.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 8, v31
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v34.l, v147.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v19, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v150, v17, v33, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v34
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 8, v34
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v148, v17, v33, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v33, v20, v35 :: v_dual_and_b32 v20, 0xffff0000, v21
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v33, v20, v35 :: v_dual_and_b32 v18, 0xffff0000, v22
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v21
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 24, v34
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 8, v34
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_add_f32 v22, 0x40c00000, v22
; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_lshlrev_b32 v22, 16, v22
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v22
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v151, v19, v35 :: v_dual_lshlrev_b32 v22, 16, v24
-; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
-; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v149, v19, v35 :: v_dual_lshlrev_b32 v22, 16, v24
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v24
; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v21
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v36, v17, v36, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v36.l, v151.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v36.l, v149.h
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v160, v17, v24 :: v_dual_lshlrev_b32 v21, 16, v23
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v23
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v150, v17, v24, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v23
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v21, 0x40c00000, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v36
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v35, v19, v35, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_add_f32 v20, 0x40c00000, v20
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v33.l, v148.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v36
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 24, v36
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v104, 8, v36
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v161, v19, v23, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v33
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v151, v19, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v26
; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v26
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v17, v24, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20
; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v38.l, v151.h
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v25
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v33.l, v150.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v38.l, v161.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v162, v17, v23, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v160, v17, v23 :: v_dual_lshlrev_b32 v21, 16, v25
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
@@ -162136,10 +162139,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18
; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v33
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v163, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v161, v19, v23 :: v_dual_lshlrev_b32 v22, 16, v28
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21
@@ -162152,10 +162153,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v27
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v35.l, v160.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v49.l, v163.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v49.l, v161.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v35.l, v150.h
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v164, v17, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v162, v17, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
@@ -162168,10 +162169,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 24, v49
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v49
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v106, 8, v35
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v165, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v49
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v49
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v35
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v163, v19, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21
@@ -162184,10 +162185,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v29
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v51.l, v165.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 24, v38
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v38
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v166, v17, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v51.l, v163.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v74, 24, v38
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v38
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v164, v17, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
@@ -162200,14 +162201,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v85
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v56, 24, v51
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v51
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v167, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v99
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v51
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v51
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v165, v19, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v85
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v99
; GFX11-TRUE16-NEXT: v_dual_add_f32 v22, 0x40c00000, v22 :: v_dual_cndmask_b32 v53, v17, v24
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
@@ -162216,14 +162217,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v84
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v53.l, v167.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v176, v17, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v98
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v37.l, v160.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v53.l, v165.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v166, v17, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v84
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v98
; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_cndmask_b32 v52, v19, v24
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
@@ -162232,10 +162233,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v21
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v37.l, v162.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 24, v53
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v42, 8, v53
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v177, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 24, v53
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 8, v53
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v37
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v167, v19, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v2
@@ -162248,11 +162249,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v55.l, v177.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v37
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v55.l, v167.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v178, v17, v22, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v176, v17, v22, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v1
@@ -162265,11 +162265,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_add_f32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v2, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 24, v55
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v55
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 24, v55
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 8, v55
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v20, 16, 1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v179, v19, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v177, v19, v21, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v4
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
@@ -162282,10 +162282,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v1, 0x7fff
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v65.l, v179.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v48.l, v162.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v65.l, v177.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v18, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v180, v17, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v178, v17, v19, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v3
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
@@ -162300,9 +162301,9 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v4, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v6
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v48.l, v164.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v141, 24, v65
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v183, v2, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 24, v65
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v65
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v179, v2, v19, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v17, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v3
@@ -162312,13 +162313,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v17, 0x7fff
; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v67.l, v183.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v67.l, v179.h
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v5
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v50.l, v166.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[96:97], 24, v[48:49]
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v43, v1, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v50.l, v164.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 24, v67
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v40, v1, v18, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v4, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6
@@ -162329,13 +162330,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v4, 0x7fff
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.l, v43.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[84:85], 24, v[50:51]
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[50:51]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v127, 24, v67
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 8, v67
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v47, v2, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[85:86], 24, v[48:49]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[37:38]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 8, v67
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v42, v2, v17, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v8
@@ -162349,23 +162350,23 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v3
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v47.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v62, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v50
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v56, v2, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: v_add3_u32 v3, v5, v6, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v69
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v48
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v68, v1, v17, vcc_lo
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v2, v4, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v7
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v69
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v56.h
; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v74, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v60, v3, v5, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v7
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v10
@@ -162379,8 +162380,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.l, v74.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v52.l, v176.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.l, v60.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v52.l, v166.h
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v1, v7, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v5, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
@@ -162388,21 +162389,20 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v91, v4, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v79, v4, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v12
; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v11
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.l, v91.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v89, v1, v4, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.l, v79.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v76, v1, v4 :: v_dual_lshlrev_b32 v1, 16, v9
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v99, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v2, v7, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
@@ -162410,10 +162410,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v54.l, v178.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.l, v89.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[132:133], 24, v[82:83]
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v98, v2, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.l, v76.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v54.l, v176.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[130:131], 24, v[82:83]
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v2, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v7, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
@@ -162421,29 +162421,29 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[54:55]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[52:53]
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v6, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v111, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v106, v2, v3, vcc_lo
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5
; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_add3_u32 v3, v7, v4, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v14
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v98.l, v111.h
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v108, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.l, v106.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v104, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[128:129], 24, v[98:99]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[96:97]
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v115, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v113, v3, v7, vcc_lo
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v13
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v115.l, v108.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v99
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v114, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.l, v104.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[31:32]
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v112, v2, v3, vcc_lo
; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v4 :: v_dual_add_f32 v3, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
@@ -162452,8 +162452,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v64.l, v180.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v138, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v64.l, v178.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v127, v4, v5, vcc_lo
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
@@ -162461,19 +162461,19 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v131, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v129, v4, v5, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v16
; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v137, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v125, v6, v7, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v15
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v114.l, v138.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v130, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.l, v40.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v128, v3, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v16
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
@@ -162481,11 +162481,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v62.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.l, v137.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.l, v125.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.l, v127.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v153, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v142, v4, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
@@ -162494,83 +162494,82 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.l, v153.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v152, v2, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.l, v142.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v141, v2, v9, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[116:117], 24, v[114:115]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[133:134], 24, v[68:69]
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v154, v7, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v42.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[112:113]
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v143, v7, v11, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[33:34]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[112:113], 24, v[130:131]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[134:135], 24, v[66:67]
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v145, v4, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[144:145], 24, v[66:67]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[131:132], 24, v[68:69]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[145:146], 24, v[64:65]
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v134, v4, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.l, v152.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[146:147], 24, v[64:65]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[31:32]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v131
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v144, v2, v3, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.l, v154.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v145
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v145
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 8, v131
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v130
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[144:145]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[37:38]
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v141.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[33:34]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 24, v129
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 8, v129
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v133, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.l, v143.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 24, v134
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v134
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v128
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v113
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[133:134]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[128:129]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[102:103], 24, v[35:36]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v144
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v115
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v115
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v79, 8, v114
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v99
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v98
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v83
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 8, v83
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v82
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 8, v68
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v66
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v142, 8, v65
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v143, 8, v64
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v40, 8, v54
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 8, v52
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v60, 8, v50
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v76, 8, v48
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v133
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 8, v113
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 8, v112
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 24, v97
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v97
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v96
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 24, v83
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v83
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 8, v82
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v69
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v69
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v68
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v66
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v152, 8, v64
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v54
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v52
; GFX11-TRUE16-NEXT: .LBB90_4: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v180.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v143.l
+; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v178.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v152.l
; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v64.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v146.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v145.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v65.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.l, v1.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v141.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v139.l
; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v2.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v179.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v142.l
+; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v177.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v140.l
; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v66.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v134.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v144.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v1
; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v67.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v2.l, v2.h
; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v3.l, v3.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v43.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v140.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v127.l
+; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v40.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v138.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v136.l
; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v68.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v5, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v133.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v131.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v3.l, v3.h
; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.l, v4.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v183.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v139.l
+; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v179.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v137.l
; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v69.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v121.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v5, v3
@@ -162578,89 +162577,89 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v4.l, v4.h
; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.l, v6.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v62.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v125.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v132.l
+; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v56.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v126.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v130.l
; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v83.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v105.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v107.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v6.l, v6.h
; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v7.l, v7.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v47.h
+; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v42.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v123.l
-; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v98.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v128.l
+; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v96.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v117.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v5, v6
-; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v99.h
+; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v97.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v7.l, v7.h
; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v8.l, v8.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v91.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v110.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v90.l
-; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v114.h
+; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v79.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v111.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v91.l
+; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v112.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v5, v7
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v116.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v114.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v8.l, v8.h
; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.l, v9.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v74.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v107.l
-; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v115.h
+; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v60.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v109.l
+; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v113.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v75.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v5, v8
-; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v130.h
+; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v128.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v9.l, v9.h
; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v111.h
+; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v106.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v95.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v112.l
-; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v131.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v101.l
+; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v129.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v5, v9
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v59.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v61.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v10.l, v10.h
; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v89.h
+; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v76.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v93.l
-; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v144.h
+; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v133.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v100.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v5, v10
-; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v145.h
+; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v134.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v11.l, v11.h
; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v12.l, v12.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v138.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v79.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v44.l
+; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v127.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v89.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v45.l
; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v31.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v5, v11
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v118.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v12.l, v12.h
; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v13.l, v13.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v108.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v77.l
+; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v104.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v78.l
; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v32.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v124.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v120.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v5, v12
; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v13.l, v13.h
; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.l, v14.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v153.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v72.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v117.l
+; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v142.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v73.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v115.l
; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v34.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v5, v13
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v109.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v105.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v14.l, v14.h
; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v15.l, v15.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v137.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v61.l
+; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v125.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v63.l
; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v35.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v102.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v5, v14
@@ -162668,71 +162667,71 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v15.l, v15.h
; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v154.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v57.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v94.l
+; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v143.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v58.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v90.l
; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v37.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v5, v15
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v101.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v86.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v16.l, v16.h
; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v17.l, v17.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v152.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v46.l
+; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v141.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v47.l
; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v38.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v78.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v74.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v5, v16
; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v48.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v17.l, v17.h
; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v18.l, v18.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v148.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v136.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v96.l
+; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v135.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v124.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v85.l
; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v49.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v5, v17
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v63.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v59.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v18.l, v18.h
; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.l, v19.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v5.h
; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v126.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v122.l
; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v50.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v86.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v84.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v5, v18
; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v51.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v19.l, v19.h
; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v20.l, v20.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v150.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v122.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v56.l
+; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v148.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v110.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v44.l
; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v52.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v5, v19
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v80.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v20.l, v20.h
; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v21.l, v21.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v149.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v120.l
+; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v147.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v108.l
; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v53.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v41.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v183.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v5, v20
; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v54.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v21.l, v21.h
; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v22.l, v22.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v160.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v106.l
+; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v150.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v94.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v70.l
; GFX11-TRUE16-NEXT: v_and_b16 v34.l, 0xff, v55.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v5, v21
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v181.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v180.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v22.l, v22.h
; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v23.l, v23.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v151.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v104.l
+; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v149.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v92.l
; GFX11-TRUE16-NEXT: s_clause 0x1
; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off
; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[6:9], off offset:16
@@ -162740,71 +162739,71 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v23.l, v23.h
; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.l, v24.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v162.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v92.l
+; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v160.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v88.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v5, v23
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v24.l, v24.h
; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v25.l, v25.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v161.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v88.l
+; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v151.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v77.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v5, v24
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v25.l, v25.h
; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v26.l, v26.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v164.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v76.l
+; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v162.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v72.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v5, v25
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v26.l, v26.h
; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v27.l, v27.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v163.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v73.l
+; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v161.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v62.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v5, v26
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v27.l, v27.h
; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v28.l, v28.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v166.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v60.l
+; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v164.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v57.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v5, v27
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v28.l, v28.h
; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v29.l, v29.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v165.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v58.l
+; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v163.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v46.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v5, v28
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v29.l, v29.h
; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v30.l, v30.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v176.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v45.l
+; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v166.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v43.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v5, v29
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v30.l, v30.h
; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v31.l, v31.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v167.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v42.l
+; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v165.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v41.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v5, v30
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v31.l, v31.h
; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v32.l, v32.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v178.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v40.l
+; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v176.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v182.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v5, v31
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v32.l, v32.h
; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v33.l, v33.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v177.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v182.l
+; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v167.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v181.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, v5, v32
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v33.l, v33.h
@@ -162820,66 +162819,64 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[26:29], off offset:96
; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[30:33], off offset:112
; GFX11-TRUE16-NEXT: s_clause 0x1f
-; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:12
-; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:16
-; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:20
-; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:24
-; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:28
-; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:32
-; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:36
-; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:40
-; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:44
-; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:48
-; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:52
-; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:56
-; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:60
-; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:64
-; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:68
-; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:72
-; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:76
-; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:80
-; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:84
-; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:88
-; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:92
-; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:96
-; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:100
-; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:104
-; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:108
-; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:112
-; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:116
-; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:120
-; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:124
-; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:128
-; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:132
-; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:136
-; GFX11-TRUE16-NEXT: s_clause 0x1a
-; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:140
-; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:144
-; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:148
-; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:152
-; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:156
-; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:160
-; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:164
-; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:168
-; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:172
-; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:176
-; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:180
-; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:184
-; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:188
-; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:192
-; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:196
-; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:200
-; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:204
-; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:208
-; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:212
-; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:216
-; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:220
-; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:224
-; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:228
-; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:232
-; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:236
-; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:240
-; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:136
+; GFX11-TRUE16-NEXT: s_clause 0x18
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:236
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -186724,55 +186721,55 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16
@@ -186798,24 +186795,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3
@@ -186827,24 +186824,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24]
@@ -186906,24 +186903,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3
@@ -186935,24 +186932,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17
; GFX11-TRUE16-NEXT: .LBB94_4: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
@@ -186990,7 +186987,7 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v68.l
; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l
; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h
@@ -186998,15 +186995,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v149.l
; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l
; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v67.l
; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l
; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h
@@ -187014,15 +187011,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v135.l
; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l
; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v133.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l
; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l
; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h
@@ -187030,15 +187027,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v129.l
; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l
; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v119.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l
; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l
; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h
@@ -187046,15 +187043,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v115.l
; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l
; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v113.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l
; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l
; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h
@@ -187062,15 +187059,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v101.l
; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l
; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v99.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l
; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l
; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h
@@ -187078,8 +187075,8 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v87.l
; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l
; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15
@@ -187094,15 +187091,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v150.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v148.l
; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l
; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l
; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l
; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h
@@ -187110,15 +187107,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v144.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v134.l
; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l
; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l
; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l
; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h
@@ -187126,15 +187123,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v130.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v128.l
; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l
; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l
; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l
; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h
@@ -187142,15 +187139,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v116.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v114.l
; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l
; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l
; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l
; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h
@@ -187158,15 +187155,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v102.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v100.l
; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l
; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l
; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l
; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h
@@ -187174,15 +187171,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v96.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v86.l
; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l
; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v85.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l
; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l
; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h
@@ -209426,55 +209423,55 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16
@@ -209500,24 +209497,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3
@@ -209529,24 +209526,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24]
@@ -209608,24 +209605,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3
@@ -209637,24 +209634,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17
; GFX11-TRUE16-NEXT: .LBB98_4: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
@@ -209692,7 +209689,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v68.l
; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l
; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h
@@ -209700,15 +209697,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v149.l
; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l
; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v67.l
; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l
; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h
@@ -209716,15 +209713,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v135.l
; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l
; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v133.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l
; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l
; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h
@@ -209732,15 +209729,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v129.l
; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l
; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v119.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l
; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l
; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h
@@ -209748,15 +209745,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v115.l
; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l
; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v113.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l
; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l
; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h
@@ -209764,15 +209761,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v101.l
; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l
; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v99.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l
; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l
; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h
@@ -209780,8 +209777,8 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v87.l
; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l
; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15
@@ -209796,15 +209793,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v150.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v148.l
; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l
; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l
; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l
; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h
@@ -209812,15 +209809,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v144.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v134.l
; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l
; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l
; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l
; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h
@@ -209828,15 +209825,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v130.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v128.l
; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l
; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l
; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l
; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h
@@ -209844,15 +209841,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v116.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v114.l
; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l
; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l
; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l
; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h
@@ -209860,15 +209857,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v102.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v100.l
; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l
; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l
; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l
; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h
@@ -209876,15 +209873,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v96.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v86.l
; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l
; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v85.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l
; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l
; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
index e33493c..d3fbba3 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
@@ -85072,13 +85072,13 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16
@@ -85086,20 +85086,20 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16
@@ -85119,18 +85119,18 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v16
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v16
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 8, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v14
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v13
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v12
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v11
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 24, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v9
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v8
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v7
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v5
@@ -85159,19 +85159,19 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v7.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.h, v8.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.h, v9.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v9.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v9.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v10.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v10.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.h, v11.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v12.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v12.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v12.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.h, v13.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v13.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.h, v14.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v87.h, v14.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v14.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v15.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v15.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v15.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v103.h, v16.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v16.h
@@ -85345,29 +85345,29 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v68.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v67.h
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v67.h
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[21:22]
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v2, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v7, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[21:22]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[19:20]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[17:18]
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v6, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v83, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v2, v3, vcc_lo
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v8 :: v_dual_lshlrev_b32 v5, 16, v14
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v83.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v24
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v82, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v82.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[17:18]
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v80, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
@@ -85384,82 +85384,81 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v23
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v80.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v24
; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v27, v2, v3 :: v_dual_add_f32 v2, 0x40c00000, v4
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v82.h
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v28
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v4, v5, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-TRUE16-NEXT: v_add3_u32 v6, v8, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v97.h
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v4, v5, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13
-; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v6, v7, vcc_lo
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v97.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v87, v6, v7, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v24
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[34:35], 24, v[27:28]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26]
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v96.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v87.h
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v3, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v16
; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v33
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v33
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v33
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v28
-; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v23
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v112, v4, v6 :: v_dual_add_f32 v1, 0x40c00000, v5
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8
-; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v8 :: v_dual_lshlrev_b32 v5, 16, v15
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v112.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v103, v2, v9, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6
-; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v15
-; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v27
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v114, v7, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v103, v2, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v113, v7, v11, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v4, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v103.h
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v37, v2, v3, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v114.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v113.h
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v38
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v38
@@ -85524,7 +85523,7 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v7.l, v7.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v31.h
; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v68.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v113.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v114.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v98.l
; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v27.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v31, v6
@@ -85541,12 +85540,12 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v8.l, v8.h
; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.l, v9.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v31.h
-; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v83.h
+; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v82.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v100.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v30.l
; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v33.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v31, v8
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v80.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v81.l
; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v9.l, v9.h
; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v31.h
@@ -85560,14 +85559,14 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v31.h
; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v97.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v87.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v96.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v69.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v31, v10
; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v11.l, v11.h
; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v12.l, v12.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v31.h
-; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v82.h
+; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v80.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v86.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v31, v11
@@ -85581,14 +85580,14 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v13.l, v13.h
; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.l, v14.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v31.h
-; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v96.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v81.l
+; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v87.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v83.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v31, v13
; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v14.l, v14.h
; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v15.l, v15.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v31.h
-; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v114.h
+; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v113.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v71.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v31, v14
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
index 67c9bfe..ecc715c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
@@ -11261,8 +11261,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; VI-NEXT: s_lshr_b32 s5, s17, 24
; VI-NEXT: s_lshr_b32 s8, s17, 16
-; VI-NEXT: s_lshr_b32 s9, s17, 8
-; VI-NEXT: s_lshr_b32 s10, s16, 16
+; VI-NEXT: s_lshr_b32 s10, s17, 8
+; VI-NEXT: s_lshr_b32 s9, s16, 16
; VI-NEXT: s_lshr_b32 s11, s16, 8
; VI-NEXT: s_cbranch_execnz .LBB85_4
; VI-NEXT: .LBB85_2: ; %cmp.true
@@ -11277,9 +11277,9 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; VI-NEXT: s_branch .LBB85_5
; VI-NEXT: .LBB85_3:
; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr4
; VI-NEXT: ; implicit-def: $sgpr9
+; VI-NEXT: ; implicit-def: $sgpr4
+; VI-NEXT: ; implicit-def: $sgpr10
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: ; implicit-def: $sgpr5
; VI-NEXT: s_branch .LBB85_2
@@ -11287,8 +11287,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v8, s16
; VI-NEXT: v_mov_b32_e32 v9, s17
; VI-NEXT: v_mov_b32_e32 v1, s11
-; VI-NEXT: v_mov_b32_e32 v2, s10
-; VI-NEXT: v_mov_b32_e32 v5, s9
+; VI-NEXT: v_mov_b32_e32 v2, s9
+; VI-NEXT: v_mov_b32_e32 v5, s10
; VI-NEXT: v_mov_b32_e32 v6, s8
; VI-NEXT: v_mov_b32_e32 v7, s5
; VI-NEXT: v_mov_b32_e32 v3, s4
@@ -11306,8 +11306,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s5, s17, 24
; GFX9-NEXT: s_lshr_b32 s8, s17, 16
-; GFX9-NEXT: s_lshr_b32 s9, s17, 8
-; GFX9-NEXT: s_lshr_b32 s10, s16, 16
+; GFX9-NEXT: s_lshr_b32 s10, s17, 8
+; GFX9-NEXT: s_lshr_b32 s9, s16, 16
; GFX9-NEXT: s_lshr_b32 s11, s16, 8
; GFX9-NEXT: s_cbranch_execnz .LBB85_4
; GFX9-NEXT: .LBB85_2: ; %cmp.true
@@ -11322,9 +11322,9 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX9-NEXT: s_branch .LBB85_5
; GFX9-NEXT: .LBB85_3:
; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr4
; GFX9-NEXT: ; implicit-def: $sgpr9
+; GFX9-NEXT: ; implicit-def: $sgpr4
+; GFX9-NEXT: ; implicit-def: $sgpr10
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr5
; GFX9-NEXT: s_branch .LBB85_2
@@ -11332,8 +11332,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
; GFX9-NEXT: v_mov_b32_e32 v1, s11
-; GFX9-NEXT: v_mov_b32_e32 v2, s10
-; GFX9-NEXT: v_mov_b32_e32 v5, s9
+; GFX9-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-NEXT: v_mov_b32_e32 v5, s10
; GFX9-NEXT: v_mov_b32_e32 v6, s8
; GFX9-NEXT: v_mov_b32_e32 v7, s5
; GFX9-NEXT: v_mov_b32_e32 v3, s4
@@ -11352,8 +11352,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
; GFX11-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-NEXT: s_lshr_b32 s6, s1, 8
-; GFX11-NEXT: s_lshr_b32 s7, s0, 16
+; GFX11-NEXT: s_lshr_b32 s7, s1, 8
+; GFX11-NEXT: s_lshr_b32 s6, s0, 16
; GFX11-NEXT: s_lshr_b32 s8, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB85_4
@@ -11370,16 +11370,16 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX11-NEXT: s_branch .LBB85_5
; GFX11-NEXT: .LBB85_3:
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr2
+; GFX11-NEXT: ; implicit-def: $sgpr7
; GFX11-NEXT: ; implicit-def: $sgpr5
; GFX11-NEXT: ; implicit-def: $sgpr3
; GFX11-NEXT: s_branch .LBB85_2
; GFX11-NEXT: .LBB85_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
-; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5
+; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5
; GFX11-NEXT: v_mov_b32_e32 v7, s3
; GFX11-NEXT: v_mov_b32_e32 v3, s2
; GFX11-NEXT: .LBB85_5: ; %end
@@ -13517,8 +13517,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s5, s17, 24
; GFX9-NEXT: s_lshr_b32 s8, s17, 16
-; GFX9-NEXT: s_lshr_b32 s9, s17, 8
-; GFX9-NEXT: s_lshr_b32 s10, s16, 16
+; GFX9-NEXT: s_lshr_b32 s10, s17, 8
+; GFX9-NEXT: s_lshr_b32 s9, s16, 16
; GFX9-NEXT: s_lshr_b32 s11, s16, 8
; GFX9-NEXT: s_cbranch_execnz .LBB97_4
; GFX9-NEXT: .LBB97_2: ; %cmp.true
@@ -13533,9 +13533,9 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX9-NEXT: s_branch .LBB97_5
; GFX9-NEXT: .LBB97_3:
; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr4
; GFX9-NEXT: ; implicit-def: $sgpr9
+; GFX9-NEXT: ; implicit-def: $sgpr4
+; GFX9-NEXT: ; implicit-def: $sgpr10
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr5
; GFX9-NEXT: s_branch .LBB97_2
@@ -13543,8 +13543,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
; GFX9-NEXT: v_mov_b32_e32 v1, s11
-; GFX9-NEXT: v_mov_b32_e32 v2, s10
-; GFX9-NEXT: v_mov_b32_e32 v5, s9
+; GFX9-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-NEXT: v_mov_b32_e32 v5, s10
; GFX9-NEXT: v_mov_b32_e32 v6, s8
; GFX9-NEXT: v_mov_b32_e32 v7, s5
; GFX9-NEXT: v_mov_b32_e32 v3, s4
@@ -13563,8 +13563,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
; GFX11-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-NEXT: s_lshr_b32 s6, s1, 8
-; GFX11-NEXT: s_lshr_b32 s7, s0, 16
+; GFX11-NEXT: s_lshr_b32 s7, s1, 8
+; GFX11-NEXT: s_lshr_b32 s6, s0, 16
; GFX11-NEXT: s_lshr_b32 s8, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB97_4
@@ -13581,16 +13581,16 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX11-NEXT: s_branch .LBB97_5
; GFX11-NEXT: .LBB97_3:
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr2
+; GFX11-NEXT: ; implicit-def: $sgpr7
; GFX11-NEXT: ; implicit-def: $sgpr5
; GFX11-NEXT: ; implicit-def: $sgpr3
; GFX11-NEXT: s_branch .LBB97_2
; GFX11-NEXT: .LBB97_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
-; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5
+; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5
; GFX11-NEXT: v_mov_b32_e32 v7, s3
; GFX11-NEXT: v_mov_b32_e32 v3, s2
; GFX11-NEXT: .LBB97_5: ; %end
@@ -15345,8 +15345,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s5, s17, 24
; GFX9-NEXT: s_lshr_b32 s8, s17, 16
-; GFX9-NEXT: s_lshr_b32 s9, s17, 8
-; GFX9-NEXT: s_lshr_b32 s10, s16, 16
+; GFX9-NEXT: s_lshr_b32 s10, s17, 8
+; GFX9-NEXT: s_lshr_b32 s9, s16, 16
; GFX9-NEXT: s_lshr_b32 s11, s16, 8
; GFX9-NEXT: s_cbranch_execnz .LBB105_4
; GFX9-NEXT: .LBB105_2: ; %cmp.true
@@ -15362,9 +15362,9 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX9-NEXT: s_branch .LBB105_5
; GFX9-NEXT: .LBB105_3:
; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr4
; GFX9-NEXT: ; implicit-def: $sgpr9
+; GFX9-NEXT: ; implicit-def: $sgpr4
+; GFX9-NEXT: ; implicit-def: $sgpr10
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr5
; GFX9-NEXT: s_branch .LBB105_2
@@ -15372,8 +15372,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
; GFX9-NEXT: v_mov_b32_e32 v1, s11
-; GFX9-NEXT: v_mov_b32_e32 v2, s10
-; GFX9-NEXT: v_mov_b32_e32 v5, s9
+; GFX9-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-NEXT: v_mov_b32_e32 v5, s10
; GFX9-NEXT: v_mov_b32_e32 v6, s8
; GFX9-NEXT: v_mov_b32_e32 v7, s5
; GFX9-NEXT: v_mov_b32_e32 v3, s4
@@ -15392,8 +15392,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
; GFX11-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-NEXT: s_lshr_b32 s6, s1, 8
-; GFX11-NEXT: s_lshr_b32 s7, s0, 16
+; GFX11-NEXT: s_lshr_b32 s7, s1, 8
+; GFX11-NEXT: s_lshr_b32 s6, s0, 16
; GFX11-NEXT: s_lshr_b32 s8, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
@@ -15410,16 +15410,16 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX11-NEXT: s_branch .LBB105_5
; GFX11-NEXT: .LBB105_3:
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr2
+; GFX11-NEXT: ; implicit-def: $sgpr7
; GFX11-NEXT: ; implicit-def: $sgpr5
; GFX11-NEXT: ; implicit-def: $sgpr3
; GFX11-NEXT: s_branch .LBB105_2
; GFX11-NEXT: .LBB105_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
-; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5
+; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5
; GFX11-NEXT: v_mov_b32_e32 v7, s3
; GFX11-NEXT: v_mov_b32_e32 v3, s2
; GFX11-NEXT: .LBB105_5: ; %end
@@ -16493,8 +16493,8 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; VI-NEXT: s_lshr_b32 s8, s17, 24
; VI-NEXT: s_lshr_b32 s5, s17, 16
-; VI-NEXT: s_lshr_b32 s9, s17, 8
-; VI-NEXT: s_lshr_b32 s10, s16, 16
+; VI-NEXT: s_lshr_b32 s10, s17, 8
+; VI-NEXT: s_lshr_b32 s9, s16, 16
; VI-NEXT: s_lshr_b32 s11, s16, 8
; VI-NEXT: s_cbranch_execnz .LBB109_4
; VI-NEXT: .LBB109_2: ; %cmp.true
@@ -16546,16 +16546,16 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB109_3:
; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr4
; VI-NEXT: ; implicit-def: $sgpr9
+; VI-NEXT: ; implicit-def: $sgpr4
+; VI-NEXT: ; implicit-def: $sgpr10
; VI-NEXT: ; implicit-def: $sgpr5
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: s_branch .LBB109_2
; VI-NEXT: .LBB109_4:
; VI-NEXT: v_mov_b32_e32 v1, s11
-; VI-NEXT: v_mov_b32_e32 v2, s10
-; VI-NEXT: v_mov_b32_e32 v5, s9
+; VI-NEXT: v_mov_b32_e32 v2, s9
+; VI-NEXT: v_mov_b32_e32 v5, s10
; VI-NEXT: v_mov_b32_e32 v7, s8
; VI-NEXT: v_mov_b32_e32 v3, s4
; VI-NEXT: v_mov_b32_e32 v0, s16
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
index 97df2a0..258bc295 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
@@ -5548,7 +5548,6 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX7LESS: ; %bb.0: ; %entry
; GFX7LESS-NEXT: s_mov_b64 s[6:7], exec
; GFX7LESS-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s4, 0
; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0
; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v4, s7, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
@@ -5557,33 +5556,32 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX7LESS-NEXT: s_cbranch_execz .LBB9_4
; GFX7LESS-NEXT: ; %bb.1:
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0
-; GFX7LESS-NEXT: s_bcnt1_i32_b64 s5, s[6:7]
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX7LESS-NEXT: s_mov_b64 s[10:11], 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, s4
; GFX7LESS-NEXT: s_mov_b32 s7, 0xf000
-; GFX7LESS-NEXT: s_mul_i32 s12, s5, 5
+; GFX7LESS-NEXT: s_mul_i32 s12, s6, 5
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s14
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s15
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s4
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s5
; GFX7LESS-NEXT: s_mov_b32 s6, -1
; GFX7LESS-NEXT: s_mov_b32 s4, s2
; GFX7LESS-NEXT: s_mov_b32 s5, s3
; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: v_mov_b32_e32 v9, v1
-; GFX7LESS-NEXT: v_mov_b32_e32 v8, v0
-; GFX7LESS-NEXT: v_subrev_i32_e32 v6, vcc, s12, v8
-; GFX7LESS-NEXT: v_subb_u32_e32 v7, vcc, v9, v5, vcc
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v0
+; GFX7LESS-NEXT: v_subrev_i32_e32 v5, vcc, s12, v7
+; GFX7LESS-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v8, vcc
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, v6
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, v7
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, v8
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, v9
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v7
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v8
; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
; GFX7LESS-NEXT: buffer_wbinvl1
-; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
; GFX7LESS-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[10:11]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2
@@ -5611,39 +5609,37 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v4, s7, v0
-; GFX8-NEXT: s_mov_b32 s4, 0
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX8-NEXT: s_cbranch_execz .LBB9_4
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0
-; GFX8-NEXT: s_bcnt1_i32_b64 s5, s[6:7]
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX8-NEXT: s_mov_b64 s[10:11], 0
-; GFX8-NEXT: v_mov_b32_e32 v5, s4
-; GFX8-NEXT: s_mul_i32 s12, s5, 5
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s14
-; GFX8-NEXT: v_mov_b32_e32 v1, s15
; GFX8-NEXT: s_mov_b32 s7, 0xf000
+; GFX8-NEXT: s_mul_i32 s12, s6, 5
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s4, s2
; GFX8-NEXT: s_mov_b32 s5, s3
; GFX8-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: v_mov_b32_e32 v9, v1
-; GFX8-NEXT: v_mov_b32_e32 v8, v0
-; GFX8-NEXT: v_subrev_u32_e32 v6, vcc, s12, v8
-; GFX8-NEXT: v_subb_u32_e32 v7, vcc, v9, v5, vcc
-; GFX8-NEXT: v_mov_b32_e32 v0, v6
-; GFX8-NEXT: v_mov_b32_e32 v1, v7
-; GFX8-NEXT: v_mov_b32_e32 v2, v8
-; GFX8-NEXT: v_mov_b32_e32 v3, v9
+; GFX8-NEXT: v_mov_b32_e32 v8, v1
+; GFX8-NEXT: v_mov_b32_e32 v7, v0
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s12, v7
+; GFX8-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v8, vcc
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
+; GFX8-NEXT: v_mov_b32_e32 v1, v6
+; GFX8-NEXT: v_mov_b32_e32 v2, v7
+; GFX8-NEXT: v_mov_b32_e32 v3, v8
; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
; GFX8-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
; GFX8-NEXT: s_andn2_b64 exec, exec, s[10:11]
; GFX8-NEXT: s_cbranch_execnz .LBB9_2
@@ -5670,39 +5666,37 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX9-NEXT: s_mov_b64 s[6:7], exec
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v4, s7, v0
-; GFX9-NEXT: s_mov_b32 s4, 0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX9-NEXT: s_cbranch_execz .LBB9_4
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0
-; GFX9-NEXT: s_bcnt1_i32_b64 s5, s[6:7]
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; GFX9-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX9-NEXT: s_mov_b64 s[10:11], 0
-; GFX9-NEXT: v_mov_b32_e32 v5, s4
-; GFX9-NEXT: s_mul_i32 s12, s5, 5
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v0, s14
-; GFX9-NEXT: v_mov_b32_e32 v1, s15
; GFX9-NEXT: s_mov_b32 s7, 0xf000
+; GFX9-NEXT: s_mul_i32 s12, s6, 5
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: s_mov_b32 s4, s2
; GFX9-NEXT: s_mov_b32 s5, s3
; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_mov_b32_e32 v9, v1
-; GFX9-NEXT: v_mov_b32_e32 v8, v0
-; GFX9-NEXT: v_subrev_co_u32_e32 v6, vcc, s12, v8
-; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v9, v5, vcc
-; GFX9-NEXT: v_mov_b32_e32 v0, v6
-; GFX9-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-NEXT: v_mov_b32_e32 v2, v8
-; GFX9-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-NEXT: v_mov_b32_e32 v8, v1
+; GFX9-NEXT: v_mov_b32_e32 v7, v0
+; GFX9-NEXT: v_subrev_co_u32_e32 v5, vcc, s12, v7
+; GFX9-NEXT: v_subbrev_co_u32_e32 v6, vcc, 0, v8, vcc
+; GFX9-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-NEXT: v_mov_b32_e32 v3, v8
; GFX9-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
; GFX9-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
; GFX9-NEXT: s_andn2_b64 exec, exec, s[10:11]
; GFX9-NEXT: s_cbranch_execnz .LBB9_2
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
index c3b14e8..ca50835 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
@@ -57,8 +57,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB0_1: ; %atomicrmw.start
@@ -69,7 +68,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -96,9 +95,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -106,7 +104,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_add_f32_e32 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -123,9 +121,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -133,7 +130,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_add_f32_e32 v4, v5, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -150,9 +147,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX7-NEXT: v_mov_b32_e32 v2, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -160,7 +156,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX7-NEXT: v_add_f32_e32 v4, v5, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -245,8 +241,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB1_1: ; %atomicrmw.start
@@ -256,7 +251,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX10-NEXT: v_mov_b32_e32 v5, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mov_b32_e32 v4, v1
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -292,16 +287,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_add_f32_e32 v1, v2, v0
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -318,16 +312,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v1, s20
; GFX7-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_add_f32_e32 v1, v2, v0
; GFX7-NEXT: v_mov_b32_e32 v5, v2
; GFX7-NEXT: v_mov_b32_e32 v4, v1
-; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -468,7 +461,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v9, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
@@ -481,7 +473,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB2_1
@@ -507,7 +498,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB2_4
@@ -556,7 +547,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v9, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -569,7 +559,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
; GFX908-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_1
; GFX908-NEXT: ; %bb.2:
@@ -594,7 +583,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
@@ -614,7 +603,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -627,7 +615,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_1
; GFX8-NEXT: ; %bb.2:
@@ -652,7 +639,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
@@ -672,7 +659,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -684,7 +670,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB2_1
; GFX7-NEXT: ; %bb.2:
@@ -709,7 +694,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB2_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
@@ -830,8 +815,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start
@@ -842,7 +826,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -860,16 +844,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -886,9 +869,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -896,7 +878,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_add_f32_e32 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -913,9 +895,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -923,7 +904,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_add_f32_e32 v4, v5, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -940,9 +921,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX7-NEXT: v_mov_b32_e32 v2, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -950,7 +930,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX7-NEXT: v_add_f32_e32 v4, v5, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1035,8 +1015,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB4_1: ; %atomicrmw.start
@@ -1046,7 +1025,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX10-NEXT: v_mov_b32_e32 v5, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mov_b32_e32 v4, v1
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -1064,15 +1043,13 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v3, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, s6
; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_add_f32_e32 v2, v3, v0
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
@@ -1089,16 +1066,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: v_add_f32_e32 v1, v2, v0
; GFX908-NEXT: v_mov_b32_e32 v5, v2
; GFX908-NEXT: v_mov_b32_e32 v4, v1
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -1115,16 +1091,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_add_f32_e32 v1, v2, v0
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -1141,16 +1116,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v1, s20
; GFX7-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_add_f32_e32 v1, v2, v0
; GFX7-NEXT: v_mov_b32_e32 v5, v2
; GFX7-NEXT: v_mov_b32_e32 v4, v1
-; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -1223,9 +1197,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_mov_b32_e32 v0, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
@@ -1237,7 +1209,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_f32_e32 v4, v5, v2
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -1255,8 +1227,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB5_1: ; %atomicrmw.start
@@ -1267,7 +1238,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -1285,16 +1256,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1311,9 +1281,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1321,7 +1290,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX908-NEXT: v_add_f32_e32 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1338,9 +1307,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1348,7 +1316,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX8-NEXT: v_add_f32_e32 v4, v5, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1365,9 +1333,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX7-NEXT: v_mov_b32_e32 v2, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -1375,7 +1342,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX7-NEXT: v_add_f32_e32 v4, v5, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1448,9 +1415,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_mov_b32_e32 v0, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
@@ -1462,7 +1427,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_f32_e32 v4, v5, v2
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -1480,8 +1445,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start
@@ -1492,7 +1456,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -1510,16 +1474,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1536,9 +1499,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1546,7 +1508,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_add_f32_e32 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1563,9 +1525,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1573,7 +1534,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_add_f32_e32 v4, v5, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1590,9 +1551,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_mov_b32_e32 v2, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -1600,7 +1560,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_add_f32_e32 v4, v5, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1673,9 +1633,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote_memory__amdgpu_ignore_denormal_mode:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_mov_b32_e32 v0, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
@@ -1687,7 +1645,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_f32_e32 v4, v5, v2
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -1705,8 +1663,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start
@@ -1717,7 +1674,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -1735,16 +1692,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1761,9 +1717,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1771,7 +1726,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_add_f32_e32 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1788,9 +1743,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1798,7 +1752,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_add_f32_e32 v4, v5, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1815,9 +1769,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_mov_b32_e32 v2, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -1825,7 +1778,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_add_f32_e32 v4, v5, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1883,24 +1836,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
+; GFX12-NEXT: v_mov_b32_e32 v10, s16
; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5]
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1925,25 +1876,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
+; GFX11-NEXT: v_mov_b32_e32 v10, s16
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1958,26 +1907,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX10-NEXT: v_mov_b32_e32 v4, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
; GFX10-NEXT: v_mov_b32_e32 v5, v1
-; GFX10-NEXT: s_add_i32 s4, s20, 0x800
-; GFX10-NEXT: v_mov_b32_e32 v6, s4
-; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX10-NEXT: v_mov_b32_e32 v10, s20
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v10, v1
-; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX10-NEXT: v_mov_b32_e32 v0, v7
-; GFX10-NEXT: v_mov_b32_e32 v1, v8
-; GFX10-NEXT: v_mov_b32_e32 v2, v9
-; GFX10-NEXT: v_mov_b32_e32 v3, v10
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX10-NEXT: v_mov_b32_e32 v0, v6
+; GFX10-NEXT: v_mov_b32_e32 v1, v7
+; GFX10-NEXT: v_mov_b32_e32 v2, v8
+; GFX10-NEXT: v_mov_b32_e32 v3, v9
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_mov_b32_e32 v9, v1
+; GFX10-NEXT: v_mov_b32_e32 v8, v0
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB8_1
@@ -1999,26 +1947,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX908-NEXT: v_mov_b32_e32 v5, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v10, s20
; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v6
+; GFX908-NEXT: v_mov_b32_e32 v1, v7
+; GFX908-NEXT: v_mov_b32_e32 v2, v8
+; GFX908-NEXT: v_mov_b32_e32 v3, v9
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX908-NEXT: v_mov_b32_e32 v9, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v8, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB8_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2030,26 +1977,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX8-NEXT: v_mov_b32_e32 v5, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v10, s20
; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v6
+; GFX8-NEXT: v_mov_b32_e32 v1, v7
+; GFX8-NEXT: v_mov_b32_e32 v2, v8
+; GFX8-NEXT: v_mov_b32_e32 v3, v9
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v9, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v8, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB8_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2061,26 +2007,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v4, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX7-NEXT: v_mov_b32_e32 v5, v1
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX7-NEXT: s_add_i32 s6, s20, 0x800
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v6, s6
+; GFX7-NEXT: v_mov_b32_e32 v10, s20
; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v10, v1
-; GFX7-NEXT: v_mov_b32_e32 v9, v0
-; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v8
-; GFX7-NEXT: v_mov_b32_e32 v2, v9
-; GFX7-NEXT: v_mov_b32_e32 v3, v10
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB8_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2092,27 +2037,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v4, v0
; GFX6-NEXT: v_mov_b32_e32 v0, s20
-; GFX6-NEXT: v_mov_b32_e32 v5, v1
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX6-NEXT: s_add_i32 s6, s20, 0x800
+; GFX6-NEXT: v_mov_b32_e32 v5, v1
; GFX6-NEXT: s_mov_b64 s[4:5], 0
-; GFX6-NEXT: v_mov_b32_e32 v6, s6
+; GFX6-NEXT: v_mov_b32_e32 v10, s6
; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v10, v1
-; GFX6-NEXT: v_mov_b32_e32 v9, v0
-; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
+; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, v7
-; GFX6-NEXT: v_mov_b32_e32 v1, v8
-; GFX6-NEXT: v_mov_b32_e32 v2, v9
-; GFX6-NEXT: v_mov_b32_e32 v3, v10
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB8_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2133,9 +2078,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v2, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
+; GFX12-NEXT: v_mov_b32_e32 v6, s16
; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
@@ -2146,7 +2089,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[4:5]
@@ -2174,9 +2117,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v2, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
+; GFX11-NEXT: v_mov_b32_e32 v6, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
@@ -2187,7 +2128,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -2205,8 +2146,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x800
-; GFX10-NEXT: v_mov_b32_e32 v6, s4
+; GFX10-NEXT: v_mov_b32_e32 v6, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX10-NEXT: .LBB9_1: ; %atomicrmw.start
@@ -2218,7 +2158,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mov_b32_e32 v8, v3
; GFX10-NEXT: v_mov_b32_e32 v7, v2
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -2246,9 +2186,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v2, s20
; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v6, s20
; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -2257,7 +2196,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v9, v4
; GFX908-NEXT: v_mov_b32_e32 v8, v3
; GFX908-NEXT: v_mov_b32_e32 v7, v2
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[4:5]
@@ -2275,9 +2214,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v2, s20
; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v6, s20
; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -2286,7 +2224,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v9, v4
; GFX8-NEXT: v_mov_b32_e32 v8, v3
; GFX8-NEXT: v_mov_b32_e32 v7, v2
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[4:5]
@@ -2304,9 +2242,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
-; GFX7-NEXT: s_add_i32 s6, s20, 0x800
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v6, s6
+; GFX7-NEXT: v_mov_b32_e32 v6, s20
; GFX7-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -2315,7 +2252,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX7-NEXT: v_mov_b32_e32 v9, v4
; GFX7-NEXT: v_mov_b32_e32 v8, v3
; GFX7-NEXT: v_mov_b32_e32 v7, v2
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[4:5]
@@ -2373,10 +2310,9 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
-; GFX12-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX12-NEXT: s_mov_b32 s1, exec_lo
; GFX12-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-NEXT: v_readfirstlane_b32 s4, v9
; GFX12-NEXT: v_readfirstlane_b32 s5, v10
; GFX12-NEXT: v_readfirstlane_b32 s6, v7
@@ -2390,7 +2326,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], null offen offset:2048
-; GFX12-NEXT: ; implicit-def: $vgpr4
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB10_1
; GFX12-NEXT: ; %bb.2:
@@ -2420,7 +2355,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB10_4
; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1
@@ -2474,22 +2409,21 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s4, v9
; GFX11-NEXT: v_readfirstlane_b32 s5, v10
; GFX11-NEXT: v_readfirstlane_b32 s6, v7
; GFX11-NEXT: v_readfirstlane_b32 s7, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[9:10]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[7:8]
; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], 0 offen offset:2048
-; GFX11-NEXT: ; implicit-def: $vgpr4
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB10_1
; GFX11-NEXT: ; %bb.2:
@@ -2518,7 +2452,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], 0 offen offset:2048 glc
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB10_4
; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1
@@ -2543,7 +2477,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX10-NEXT: v_mov_b32_e32 v7, v2
; GFX10-NEXT: v_mov_b32_e32 v10, v1
; GFX10-NEXT: v_mov_b32_e32 v9, v0
-; GFX10-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
@@ -2556,7 +2489,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX10-NEXT: ; implicit-def: $vgpr4
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB10_1
@@ -2584,7 +2516,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB10_4
@@ -2640,7 +2572,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: v_mov_b32_e32 v7, v2
; GFX908-NEXT: v_mov_b32_e32 v10, v1
; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_add_u32_e32 v15, 0x800, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v9
@@ -2653,7 +2584,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
; GFX908-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX908-NEXT: ; implicit-def: $vgpr4
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB10_1
; GFX908-NEXT: ; %bb.2:
@@ -2680,7 +2610,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB10_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1
@@ -2704,7 +2634,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: v_mov_b32_e32 v7, v2
; GFX8-NEXT: v_mov_b32_e32 v10, v1
; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0x800, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v9
@@ -2717,7 +2646,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX8-NEXT: ; implicit-def: $vgpr4
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB10_1
; GFX8-NEXT: ; %bb.2:
@@ -2744,7 +2672,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB10_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1
@@ -2768,7 +2696,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX7-NEXT: v_mov_b32_e32 v7, v2
; GFX7-NEXT: v_mov_b32_e32 v10, v1
; GFX7-NEXT: v_mov_b32_e32 v9, v0
-; GFX7-NEXT: v_add_i32_e32 v15, vcc, 0x800, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v9
@@ -2780,7 +2707,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX7-NEXT: ; implicit-def: $vgpr4
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB10_1
; GFX7-NEXT: ; %bb.2:
@@ -2807,7 +2733,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB10_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1
@@ -2903,24 +2829,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
+; GFX12-NEXT: v_mov_b32_e32 v10, s16
; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5]
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2945,25 +2869,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
+; GFX11-NEXT: v_mov_b32_e32 v10, s16
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2978,26 +2900,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX10-NEXT: v_mov_b32_e32 v4, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
; GFX10-NEXT: v_mov_b32_e32 v5, v1
-; GFX10-NEXT: s_add_i32 s4, s20, 0x800
-; GFX10-NEXT: v_mov_b32_e32 v6, s4
-; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX10-NEXT: v_mov_b32_e32 v10, s20
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v10, v1
-; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX10-NEXT: v_mov_b32_e32 v0, v7
-; GFX10-NEXT: v_mov_b32_e32 v1, v8
-; GFX10-NEXT: v_mov_b32_e32 v2, v9
-; GFX10-NEXT: v_mov_b32_e32 v3, v10
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX10-NEXT: v_mov_b32_e32 v0, v6
+; GFX10-NEXT: v_mov_b32_e32 v1, v7
+; GFX10-NEXT: v_mov_b32_e32 v2, v8
+; GFX10-NEXT: v_mov_b32_e32 v3, v9
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_mov_b32_e32 v9, v1
+; GFX10-NEXT: v_mov_b32_e32 v8, v0
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB11_1
@@ -3010,23 +2931,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v4, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
+; GFX90A-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX90A-NEXT: v_mov_b32_e32 v5, v1
-; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x800
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v6, s6
+; GFX90A-NEXT: v_mov_b32_e32 v10, s20
; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1]
-; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX90A-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[8:9], v[8:9] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB11_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3038,26 +2958,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX908-NEXT: v_mov_b32_e32 v5, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v10, s20
; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v6
+; GFX908-NEXT: v_mov_b32_e32 v1, v7
+; GFX908-NEXT: v_mov_b32_e32 v2, v8
+; GFX908-NEXT: v_mov_b32_e32 v3, v9
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX908-NEXT: v_mov_b32_e32 v9, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v8, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB11_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3069,26 +2988,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX8-NEXT: v_mov_b32_e32 v5, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v10, s20
; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v6
+; GFX8-NEXT: v_mov_b32_e32 v1, v7
+; GFX8-NEXT: v_mov_b32_e32 v2, v8
+; GFX8-NEXT: v_mov_b32_e32 v3, v9
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v9, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v8, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB11_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3100,26 +3018,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v4, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX7-NEXT: v_mov_b32_e32 v5, v1
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX7-NEXT: s_add_i32 s6, s20, 0x800
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v6, s6
+; GFX7-NEXT: v_mov_b32_e32 v10, s20
; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v10, v1
-; GFX7-NEXT: v_mov_b32_e32 v9, v0
-; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v8
-; GFX7-NEXT: v_mov_b32_e32 v2, v9
-; GFX7-NEXT: v_mov_b32_e32 v3, v10
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB11_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3131,27 +3048,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v4, v0
; GFX6-NEXT: v_mov_b32_e32 v0, s20
-; GFX6-NEXT: v_mov_b32_e32 v5, v1
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX6-NEXT: s_add_i32 s6, s20, 0x800
+; GFX6-NEXT: v_mov_b32_e32 v5, v1
; GFX6-NEXT: s_mov_b64 s[4:5], 0
-; GFX6-NEXT: v_mov_b32_e32 v6, s6
+; GFX6-NEXT: v_mov_b32_e32 v10, s6
; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v10, v1
-; GFX6-NEXT: v_mov_b32_e32 v9, v0
-; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
+; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, v7
-; GFX6-NEXT: v_mov_b32_e32 v1, v8
-; GFX6-NEXT: v_mov_b32_e32 v2, v9
-; GFX6-NEXT: v_mov_b32_e32 v3, v10
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB11_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3173,24 +3090,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
+; GFX12-NEXT: v_mov_b32_e32 v10, s16
; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5]
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -3215,25 +3130,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
+; GFX11-NEXT: v_mov_b32_e32 v10, s16
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -3248,26 +3161,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX10-NEXT: v_mov_b32_e32 v4, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
; GFX10-NEXT: v_mov_b32_e32 v5, v1
-; GFX10-NEXT: s_add_i32 s4, s20, 0x800
-; GFX10-NEXT: v_mov_b32_e32 v6, s4
-; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX10-NEXT: v_mov_b32_e32 v10, s20
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX10-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v10, v1
-; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX10-NEXT: v_mov_b32_e32 v0, v7
-; GFX10-NEXT: v_mov_b32_e32 v1, v8
-; GFX10-NEXT: v_mov_b32_e32 v2, v9
-; GFX10-NEXT: v_mov_b32_e32 v3, v10
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX10-NEXT: v_mov_b32_e32 v0, v6
+; GFX10-NEXT: v_mov_b32_e32 v1, v7
+; GFX10-NEXT: v_mov_b32_e32 v2, v8
+; GFX10-NEXT: v_mov_b32_e32 v3, v9
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_mov_b32_e32 v9, v1
+; GFX10-NEXT: v_mov_b32_e32 v8, v0
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB12_1
@@ -3289,26 +3201,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX908-NEXT: v_mov_b32_e32 v5, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v10, s20
; GFX908-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v6
+; GFX908-NEXT: v_mov_b32_e32 v1, v7
+; GFX908-NEXT: v_mov_b32_e32 v2, v8
+; GFX908-NEXT: v_mov_b32_e32 v3, v9
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX908-NEXT: v_mov_b32_e32 v9, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v8, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB12_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3320,26 +3231,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX8-NEXT: v_mov_b32_e32 v5, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v10, s20
; GFX8-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v6
+; GFX8-NEXT: v_mov_b32_e32 v1, v7
+; GFX8-NEXT: v_mov_b32_e32 v2, v8
+; GFX8-NEXT: v_mov_b32_e32 v3, v9
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v9, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v8, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB12_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3351,26 +3261,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v4, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX7-NEXT: v_mov_b32_e32 v5, v1
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX7-NEXT: s_add_i32 s6, s20, 0x800
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v6, s6
+; GFX7-NEXT: v_mov_b32_e32 v10, s20
; GFX7-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v10, v1
-; GFX7-NEXT: v_mov_b32_e32 v9, v0
-; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v8
-; GFX7-NEXT: v_mov_b32_e32 v2, v9
-; GFX7-NEXT: v_mov_b32_e32 v3, v10
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB12_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3382,27 +3291,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v4, v0
; GFX6-NEXT: v_mov_b32_e32 v0, s20
-; GFX6-NEXT: v_mov_b32_e32 v5, v1
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX6-NEXT: s_add_i32 s6, s20, 0x800
+; GFX6-NEXT: v_mov_b32_e32 v5, v1
; GFX6-NEXT: s_mov_b64 s[4:5], 0
-; GFX6-NEXT: v_mov_b32_e32 v6, s6
+; GFX6-NEXT: v_mov_b32_e32 v10, s6
; GFX6-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v10, v1
-; GFX6-NEXT: v_mov_b32_e32 v9, v0
-; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
+; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, v7
-; GFX6-NEXT: v_mov_b32_e32 v1, v8
-; GFX6-NEXT: v_mov_b32_e32 v2, v9
-; GFX6-NEXT: v_mov_b32_e32 v3, v10
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB12_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7028,9 +6937,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_mov_b32_e32 v0, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
@@ -7042,7 +6949,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_pk_add_f16 v4, v5, v2
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -7060,8 +6967,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start
@@ -7072,7 +6978,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: v_pk_add_f16 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -7099,9 +7005,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -7109,7 +7014,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_pk_add_f16 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -7126,9 +7031,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -7138,7 +7042,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_or_b32_e32 v4, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -7156,7 +7060,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -7164,7 +7067,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -7181,7 +7084,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_or_b32_e32 v5, v7, v0
; GFX7-NEXT: v_mov_b32_e32 v8, v6
; GFX7-NEXT: v_mov_b32_e32 v7, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7
@@ -7277,9 +7180,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v1, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB20_1: ; %atomicrmw.start
@@ -7290,7 +7191,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_mov_b32_e32 v4, v1
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -7308,8 +7209,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start
@@ -7319,7 +7219,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX10-NEXT: v_mov_b32_e32 v5, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mov_b32_e32 v4, v1
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -7355,9 +7255,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -7366,7 +7265,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -7385,7 +7284,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -7393,7 +7291,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
@@ -7410,7 +7308,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: v_or_b32_e32 v4, v6, v3
; GFX7-NEXT: v_mov_b32_e32 v7, v5
; GFX7-NEXT: v_mov_b32_e32 v6, v4
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6
@@ -7543,7 +7441,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x400, v4
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -7558,7 +7455,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: buffer_load_b32 v8, v4, s[4:7], 0 offen offset:1024
-; GFX11-NEXT: ; implicit-def: $vgpr4
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB21_1
; GFX11-NEXT: ; %bb.2:
@@ -7587,7 +7483,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[6:7], v9, s[4:7], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[6:7], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB21_4
; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
@@ -7609,7 +7505,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v9, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -7622,7 +7517,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB21_1
@@ -7648,7 +7542,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB21_4
@@ -7697,7 +7591,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v9, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -7710,7 +7603,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
; GFX908-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB21_1
; GFX908-NEXT: ; %bb.2:
@@ -7735,7 +7627,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB21_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
@@ -7755,7 +7647,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -7768,7 +7659,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB21_1
; GFX8-NEXT: ; %bb.2:
@@ -7778,9 +7668,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB21_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_add_f16_sdwa v4, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_add_f16_e32 v6, v8, v5
-; GFX8-NEXT: v_or_b32_e32 v7, v6, v4
+; GFX8-NEXT: v_add_f16_sdwa v6, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v7, v8, v5
+; GFX8-NEXT: v_or_b32_e32 v7, v7, v6
; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: s_mov_b64 s[12:13], exec
; GFX8-NEXT: v_mov_b32_e32 v7, v8
@@ -7795,7 +7685,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB21_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
@@ -7815,7 +7705,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -7826,39 +7715,38 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3]
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
-; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
+; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB21_1
; GFX7-NEXT: ; %bb.2:
; GFX7-NEXT: s_mov_b64 exec, s[6:7]
; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6
-; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v9, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v6
-; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v9
; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Loop Header: Depth=1
; GFX7-NEXT: ; Child Loop BB21_4 Depth 2
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v7
; GFX7-NEXT: s_mov_b64 s[12:13], exec
-; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5
-; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v6
; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_add_f32_e32 v6, v6, v10
-; GFX7-NEXT: v_add_f32_e32 v7, v7, v11
-; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v6
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v5
+; GFX7-NEXT: v_add_f32_e32 v7, v7, v10
+; GFX7-NEXT: v_add_f32_e32 v8, v8, v11
; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7
-; GFX7-NEXT: v_or_b32_e32 v6, v4, v5
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v8
-; GFX7-NEXT: v_or_b32_e32 v5, v7, v4
-; GFX7-NEXT: v_mov_b32_e32 v8, v6
-; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_or_b32_e32 v5, v8, v5
+; GFX7-NEXT: v_mov_b32_e32 v9, v6
+; GFX7-NEXT: v_mov_b32_e32 v8, v5
; GFX7-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX7-NEXT: ; => This Inner Loop Header: Depth=2
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -7870,23 +7758,23 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v9, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB21_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX7-NEXT: s_mov_b64 exec, s[12:13]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX7-NEXT: s_cbranch_execnz .LBB21_3
; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v4
+; GFX7-NEXT: v_mov_b32_e32 v0, v7
; GFX7-NEXT: v_mov_b32_e32 v1, v5
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -8003,9 +7891,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_mov_b32_e32 v0, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
@@ -8017,7 +7903,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_pk_add_f16 v4, v5, v2
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -8035,8 +7921,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start
@@ -8047,7 +7932,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX10-NEXT: v_pk_add_f16 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -8065,16 +7950,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-NEXT: v_pk_add_f16 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -8091,9 +7975,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -8101,7 +7984,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX908-NEXT: v_pk_add_f16 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -8118,9 +8001,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -8130,7 +8012,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX8-NEXT: v_or_b32_e32 v4, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -8148,7 +8030,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -8156,7 +8037,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -8173,7 +8054,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX7-NEXT: v_or_b32_e32 v5, v7, v0
; GFX7-NEXT: v_mov_b32_e32 v8, v6
; GFX7-NEXT: v_mov_b32_e32 v7, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7
@@ -8269,9 +8150,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v1, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
@@ -8282,7 +8161,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_mov_b32_e32 v4, v1
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -8300,8 +8179,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start
@@ -8311,7 +8189,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX10-NEXT: v_mov_b32_e32 v5, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mov_b32_e32 v4, v1
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -8329,15 +8207,13 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v3, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, s6
; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_pk_add_f16 v2, v3, v0
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
@@ -8354,16 +8230,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: v_pk_add_f16 v1, v2, v0
; GFX908-NEXT: v_mov_b32_e32 v5, v2
; GFX908-NEXT: v_mov_b32_e32 v4, v1
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -8380,9 +8255,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -8391,7 +8265,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -8410,7 +8284,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -8418,7 +8291,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
@@ -8435,7 +8308,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX7-NEXT: v_or_b32_e32 v4, v6, v3
; GFX7-NEXT: v_mov_b32_e32 v7, v5
; GFX7-NEXT: v_mov_b32_e32 v6, v4
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6
@@ -8530,9 +8403,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no_remote_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_mov_b32_e32 v0, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
@@ -8544,7 +8415,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_pk_add_f16 v4, v5, v2
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -8562,8 +8433,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB24_1: ; %atomicrmw.start
@@ -8574,7 +8444,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: v_pk_add_f16 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -8592,16 +8462,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-NEXT: v_pk_add_f16 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -8618,9 +8487,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -8628,7 +8496,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_pk_add_f16 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -8645,9 +8513,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -8657,7 +8524,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_or_b32_e32 v4, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -8675,7 +8542,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -8683,7 +8549,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -8700,7 +8566,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_or_b32_e32 v5, v7, v0
; GFX7-NEXT: v_mov_b32_e32 v8, v6
; GFX7-NEXT: v_mov_b32_e32 v7, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7
@@ -8796,9 +8662,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v1, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB25_1: ; %atomicrmw.start
@@ -8809,7 +8673,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_mov_b32_e32 v4, v1
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -8827,8 +8691,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB25_1: ; %atomicrmw.start
@@ -8838,7 +8701,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX10-NEXT: v_mov_b32_e32 v5, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mov_b32_e32 v4, v1
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -8856,15 +8719,13 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v3, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, s6
; GFX90A-NEXT: .LBB25_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_pk_add_f16 v2, v3, v0
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
@@ -8881,16 +8742,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB25_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: v_pk_add_f16 v1, v2, v0
; GFX908-NEXT: v_mov_b32_e32 v5, v2
; GFX908-NEXT: v_mov_b32_e32 v4, v1
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -8907,9 +8767,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB25_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -8918,7 +8777,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -8937,7 +8796,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -8945,7 +8803,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB25_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
@@ -8962,7 +8820,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX7-NEXT: v_or_b32_e32 v4, v6, v3
; GFX7-NEXT: v_mov_b32_e32 v7, v5
; GFX7-NEXT: v_mov_b32_e32 v6, v4
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6
@@ -9054,13 +8912,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -9082,7 +8939,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5]
; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -9097,12 +8954,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
@@ -9131,7 +8987,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -9149,10 +9005,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
@@ -9183,7 +9038,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -9202,9 +9057,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
@@ -9230,7 +9084,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: v_mov_b32_e32 v1, v6
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -9248,13 +9102,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -9275,7 +9128,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -9292,13 +9145,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -9320,7 +9172,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: v_mov_b32_e32 v1, v6
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -9337,11 +9189,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -9366,7 +9217,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -9382,7 +9233,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -9391,7 +9241,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
@@ -9406,7 +9256,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: v_alignbit_b32 v0, v0, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v1
; GFX7-NEXT: v_mov_b32_e32 v5, v0
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -9488,13 +9338,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -9515,7 +9364,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5]
; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -9531,11 +9380,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
@@ -9561,7 +9408,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -9580,11 +9427,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
@@ -9610,7 +9455,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -9629,12 +9474,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -9656,7 +9500,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v6, v1
; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -9674,13 +9518,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -9700,7 +9543,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -9717,13 +9560,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -9744,7 +9586,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v6, v1
; GFX908-NEXT: v_mov_b32_e32 v5, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -9761,11 +9603,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -9789,7 +9630,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -9806,7 +9647,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -9815,7 +9655,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
@@ -9830,7 +9670,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v4
; GFX7-NEXT: v_mov_b32_e32 v5, v3
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
@@ -9930,7 +9770,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -9942,40 +9781,39 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB28_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX942-NEXT: v_lshlrev_b32_e32 v10, 16, v5
; GFX942-NEXT: s_movk_i32 s10, 0x7fff
-; GFX942-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX942-NEXT: s_mov_b32 s11, 0x7060302
; GFX942-NEXT: .LBB28_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB28_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v7
-; GFX942-NEXT: v_add_f32_e32 v4, v4, v9
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s10
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX942-NEXT: v_add_f32_e32 v6, v6, v10
+; GFX942-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX942-NEXT: v_add3_u32 v7, v7, v6, s10
+; GFX942-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX942-NEXT: s_mov_b64 s[8:9], exec
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v7
-; GFX942-NEXT: v_add_f32_e32 v5, v5, v10
-; GFX942-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX942-NEXT: v_add3_u32 v6, v6, v5, s10
-; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
+; GFX942-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GFX942-NEXT: v_and_b32_e32 v7, 0xffff0000, v9
+; GFX942-NEXT: v_add_f32_e32 v7, v7, v5
+; GFX942-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX942-NEXT: v_add3_u32 v8, v8, v7, s10
+; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc
-; GFX942-NEXT: v_perm_b32 v6, v5, v4, s11
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc
+; GFX942-NEXT: v_perm_b32 v8, v7, v6, s11
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -9988,27 +9826,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB28_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB28_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-TRUE16-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
@@ -10022,8 +9859,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
; GFX11-TRUE16-NEXT: ; %bb.2:
@@ -10036,28 +9872,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
; GFX11-TRUE16-NEXT: ; Child Loop BB28_4 Depth 2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v8 :: v_dual_add_f32 v4, v4, v9
-; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, v6, v8 :: v_dual_add_f32 v5, v5, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7
; GFX11-TRUE16-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -10071,14 +9907,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_4
; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v5
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -10088,13 +9924,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v5
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-FAKE16-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
@@ -10108,8 +9943,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
; GFX11-FAKE16-NEXT: ; %bb.2:
@@ -10122,28 +9956,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
; GFX11-FAKE16-NEXT: ; Child Loop BB28_4 Depth 2
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v9 :: v_dual_add_f32 v4, v4, v8
-; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v6, v6, v9 :: v_dual_add_f32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v7
; GFX11-FAKE16-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -10157,14 +9991,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_4
; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v5
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -10174,13 +10008,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v5
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
@@ -10192,8 +10025,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3]
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
-; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
+; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB28_1
@@ -10205,25 +10037,25 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB28_4 Depth 2
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_add_f32_e32 v4, v4, v8
-; GFX10-NEXT: v_add_f32_e32 v5, v5, v9
-; GFX10-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX10-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX10-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX10-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX10-NEXT: v_add_f32_e32 v5, v5, v8
+; GFX10-NEXT: v_add_f32_e32 v6, v6, v9
+; GFX10-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX10-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX10-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX10-NEXT: v_mov_b32_e32 v4, v5
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX10-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX10-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX10-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v5, v6
+; GFX10-NEXT: v_mov_b32_e32 v6, v7
; GFX10-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
; GFX10-NEXT: v_readfirstlane_b32 s8, v0
@@ -10235,15 +10067,15 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB28_4
; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX10-NEXT: s_mov_b32 exec_lo, s6
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
@@ -10252,13 +10084,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX10-NEXT: s_cbranch_execnz .LBB28_3
; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -10270,38 +10101,37 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB28_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX90A-NEXT: v_lshlrev_b32_e32 v10, 16, v5
; GFX90A-NEXT: s_movk_i32 s14, 0x7fff
-; GFX90A-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX90A-NEXT: s_mov_b32 s15, 0x7060302
; GFX90A-NEXT: .LBB28_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB28_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v7
-; GFX90A-NEXT: v_add_f32_e32 v4, v4, v9
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s14
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v7
-; GFX90A-NEXT: v_add_f32_e32 v5, v5, v10
-; GFX90A-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX90A-NEXT: v_add3_u32 v6, v6, v5, s14
-; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc
-; GFX90A-NEXT: v_perm_b32 v6, v5, v4, s15
+; GFX90A-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX90A-NEXT: v_add_f32_e32 v6, v6, v10
+; GFX90A-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX90A-NEXT: v_add3_u32 v7, v7, v6, s14
+; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX90A-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v9
+; GFX90A-NEXT: v_add_f32_e32 v7, v7, v5
+; GFX90A-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX90A-NEXT: v_add3_u32 v8, v8, v7, s14
+; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX90A-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc
+; GFX90A-NEXT: v_perm_b32 v8, v7, v6, s15
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -10313,27 +10143,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB28_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB28_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -10345,8 +10174,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB28_1
; GFX908-NEXT: ; %bb.2:
@@ -10360,24 +10188,24 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB28_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX908-NEXT: v_add_f32_e32 v4, v4, v8
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s14
-; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX908-NEXT: v_add_f32_e32 v5, v5, v9
-; GFX908-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX908-NEXT: v_add3_u32 v10, v10, v5, s14
-; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX908-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX908-NEXT: v_add_f32_e32 v5, v5, v8
+; GFX908-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX908-NEXT: v_add3_u32 v6, v6, v5, s14
+; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc
-; GFX908-NEXT: v_perm_b32 v5, v5, v4, s15
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc
+; GFX908-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX908-NEXT: v_add_f32_e32 v6, v6, v9
+; GFX908-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX908-NEXT: v_add3_u32 v10, v10, v6, s14
+; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX908-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX908-NEXT: v_perm_b32 v6, v6, v5, s15
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -10389,27 +10217,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB28_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB28_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -10421,8 +10248,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB28_1
; GFX8-NEXT: ; %bb.2:
@@ -10434,27 +10260,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB28_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX8-NEXT: v_add_f32_e32 v4, v4, v8
-; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX8-NEXT: v_add_f32_e32 v5, v5, v9
-; GFX8-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v5
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10
-; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX8-NEXT: v_add_f32_e32 v5, v5, v8
+; GFX8-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v5
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v5, v5, v4, 16
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX8-NEXT: v_add_f32_e32 v6, v6, v9
+; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10
+; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_alignbit_b32 v6, v6, v5, 16
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -10466,27 +10292,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB28_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB28_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -10497,36 +10322,35 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3]
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
-; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
+; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB28_1
; GFX7-NEXT: ; %bb.2:
; GFX7-NEXT: s_mov_b64 exec, s[6:7]
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v5
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v6
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX7-NEXT: s_mov_b64 s[6:7], 0
-; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v5
; GFX7-NEXT: .LBB28_3: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Loop Header: Depth=1
; GFX7-NEXT: ; Child Loop BB28_4 Depth 2
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v4
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7
-; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v5
-; GFX7-NEXT: v_add_f32_e32 v4, v4, v10
-; GFX7-NEXT: v_add_f32_e32 v6, v6, v9
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_alignbit_b32 v4, v4, v6, 16
-; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v7
-; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16
-; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v8
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v7
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX7-NEXT: v_add_f32_e32 v8, v8, v11
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX7-NEXT: v_add_f32_e32 v5, v5, v10
+; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX7-NEXT: v_alignbit_b32 v6, v7, v6, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v8, v5, 16
+; GFX7-NEXT: v_mov_b32_e32 v9, v6
; GFX7-NEXT: s_mov_b64 s[12:13], exec
-; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: v_mov_b32_e32 v8, v5
; GFX7-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX7-NEXT: ; => This Inner Loop Header: Depth=2
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -10538,23 +10362,23 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v8, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB28_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX7-NEXT: s_mov_b64 exec, s[12:13]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v5
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX7-NEXT: s_cbranch_execnz .LBB28_3
; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v4
+; GFX7-NEXT: v_mov_b32_e32 v0, v8
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -10658,13 +10482,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -10686,7 +10509,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5]
; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -10701,12 +10524,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
@@ -10735,7 +10557,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -10753,10 +10575,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
@@ -10787,7 +10608,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -10806,9 +10627,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
@@ -10834,7 +10654,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: v_mov_b32_e32 v1, v6
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -10852,13 +10672,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -10879,7 +10698,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -10896,13 +10715,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -10924,7 +10742,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: v_mov_b32_e32 v1, v6
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -10941,11 +10759,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -10970,7 +10787,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -10986,7 +10803,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -10995,7 +10811,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
@@ -11010,7 +10826,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX7-NEXT: v_alignbit_b32 v0, v0, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v1
; GFX7-NEXT: v_mov_b32_e32 v5, v0
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -11092,13 +10908,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB30_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -11119,7 +10934,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5]
; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -11135,11 +10950,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
@@ -11165,7 +10978,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -11184,11 +10997,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
@@ -11214,7 +11025,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -11233,12 +11044,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB30_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -11260,7 +11070,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v6, v1
; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -11278,13 +11088,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB30_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -11304,7 +11113,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -11321,13 +11130,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB30_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -11348,7 +11156,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v6, v1
; GFX908-NEXT: v_mov_b32_e32 v5, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -11365,11 +11173,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB30_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -11393,7 +11200,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -11410,7 +11217,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -11419,7 +11225,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB30_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
@@ -11434,7 +11240,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v4
; GFX7-NEXT: v_mov_b32_e32 v5, v3
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
@@ -11517,13 +11323,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -11545,7 +11350,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5]
; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -11560,12 +11365,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
@@ -11594,7 +11398,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -11612,10 +11416,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
@@ -11646,7 +11449,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -11665,9 +11468,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
@@ -11693,7 +11495,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: v_mov_b32_e32 v1, v6
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -11711,13 +11513,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -11738,7 +11539,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -11755,13 +11556,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -11783,7 +11583,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: v_mov_b32_e32 v1, v6
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -11800,11 +11600,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -11829,7 +11628,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -11845,7 +11644,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -11854,7 +11652,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
@@ -11869,7 +11667,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: v_alignbit_b32 v0, v0, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v1
; GFX7-NEXT: v_mov_b32_e32 v5, v0
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -11951,13 +11749,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB32_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -11978,7 +11775,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5]
; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -11994,11 +11791,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
@@ -12024,7 +11819,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -12043,11 +11838,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
@@ -12073,7 +11866,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -12092,12 +11885,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB32_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -12119,7 +11911,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v6, v1
; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -12137,13 +11929,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB32_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -12163,7 +11954,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -12180,13 +11971,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB32_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -12207,7 +11997,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v6, v1
; GFX908-NEXT: v_mov_b32_e32 v5, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -12224,11 +12014,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB32_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -12252,7 +12041,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -12269,7 +12058,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -12278,7 +12066,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB32_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
@@ -12293,7 +12081,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v4
; GFX7-NEXT: v_mov_b32_e32 v5, v3
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
@@ -12375,13 +12163,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB33_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -12402,7 +12189,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5]
; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -12418,11 +12205,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
@@ -12448,7 +12233,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -12467,11 +12252,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
@@ -12497,7 +12280,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -12516,12 +12299,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB33_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -12543,7 +12325,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v6, v1
; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -12561,13 +12343,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB33_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -12587,7 +12368,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -12604,13 +12385,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB33_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -12631,7 +12411,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v6, v1
; GFX908-NEXT: v_mov_b32_e32 v5, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -12648,11 +12428,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB33_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -12676,7 +12455,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -12693,7 +12472,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -12702,7 +12480,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB33_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
@@ -12717,7 +12495,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v4
; GFX7-NEXT: v_mov_b32_e32 v5, v3
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
@@ -12825,8 +12603,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB34_1: ; %atomicrmw.start
@@ -12837,7 +12614,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -12855,9 +12632,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB34_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -12865,7 +12641,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1
@@ -12883,9 +12659,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB34_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -12893,7 +12668,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_add_f32_e32 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -12910,9 +12685,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB34_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -12920,7 +12694,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_add_f32_e32 v4, v5, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -12937,9 +12711,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX7-NEXT: v_mov_b32_e32 v2, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB34_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -12947,7 +12720,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX7-NEXT: v_add_f32_e32 v4, v5, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll
index f7a1fb3..1a4140c 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll
@@ -37,10 +37,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -49,7 +48,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_max_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -88,10 +87,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -99,7 +97,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -116,10 +114,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -128,7 +125,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_max_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -145,10 +142,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -157,7 +153,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_max_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -212,10 +208,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v0, v0
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -223,7 +218,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: v_max_f32_e32 v0, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -262,17 +257,16 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v0, v0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_max_f32_e32 v0, v1, v1
; GFX90A-NEXT: v_max_f32_e32 v0, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -289,10 +283,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v0, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -300,7 +293,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_max_f32_e32 v0, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: v_mov_b32_e32 v4, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -317,10 +310,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -328,7 +320,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_max_f32_e32 v0, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: v_mov_b32_e32 v4, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -402,7 +394,6 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -414,22 +405,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB2_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_max_f32_e32 v9, v5, v5
+; GFX942-NEXT: v_max_f32_e32 v5, v5, v5
; GFX942-NEXT: .LBB2_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB2_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_max_f32_e32 v4, v7, v7
-; GFX942-NEXT: v_max_f32_e32 v6, v4, v9
+; GFX942-NEXT: v_max_f32_e32 v6, v9, v9
+; GFX942-NEXT: v_max_f32_e32 v8, v6, v5
; GFX942-NEXT: s_mov_b64 s[8:9], exec
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: buffer_wbl2 sc1
; GFX942-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
@@ -443,21 +433,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB2_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB2_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -522,7 +512,6 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -534,22 +523,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_max_f32_e32 v9, v5, v5
+; GFX90A-NEXT: v_max_f32_e32 v5, v5, v5
; GFX90A-NEXT: .LBB2_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB2_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_max_f32_e32 v4, v7, v7
-; GFX90A-NEXT: v_max_f32_e32 v6, v4, v9
+; GFX90A-NEXT: v_max_f32_e32 v6, v9, v9
+; GFX90A-NEXT: v_max_f32_e32 v8, v6, v5
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -561,27 +549,26 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -593,8 +580,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_1
; GFX908-NEXT: ; %bb.2:
@@ -605,11 +591,11 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB2_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_max_f32_e32 v4, v6, v6
-; GFX908-NEXT: v_max_f32_e32 v5, v4, v8
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_max_f32_e32 v5, v7, v7
+; GFX908-NEXT: v_max_f32_e32 v6, v5, v8
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -621,27 +607,26 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB2_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -653,8 +638,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_1
; GFX8-NEXT: ; %bb.2:
@@ -665,11 +649,11 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB2_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v6
-; GFX8-NEXT: v_max_f32_e32 v5, v4, v8
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mul_f32_e32 v5, 1.0, v7
+; GFX8-NEXT: v_max_f32_e32 v6, v5, v8
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -681,21 +665,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB2_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -777,10 +761,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -789,7 +772,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX942-NEXT: v_max_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -804,11 +787,10 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_max_f32 v2, v1, v1
-; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v3, s16 :: v_dual_max_f32 v2, v1, v1
+; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -819,7 +801,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX11-NEXT: v_max_f32_e32 v4, v0, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -837,11 +819,10 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: v_max_f32_e32 v2, v1, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -851,7 +832,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: v_max_f32_e32 v4, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -869,10 +850,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -880,7 +860,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -897,10 +877,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -909,7 +888,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_max_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -926,10 +905,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -938,7 +916,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_max_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -955,10 +933,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_mov_b32_e32 v1, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -967,7 +944,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_max_f32_e32 v4, v0, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1035,10 +1012,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -1047,7 +1023,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_max_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1086,10 +1062,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -1097,7 +1072,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1114,10 +1089,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1126,7 +1100,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_max_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1143,10 +1117,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1155,7 +1128,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_max_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1201,29 +1174,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v2, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1246,30 +1217,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v2, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1301,30 +1270,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v0, s20
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
+; GFX908-NEXT: v_mov_b32_e32 v2, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB5_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1334,30 +1300,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, s20
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
+; GFX8-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB5_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1397,11 +1360,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v2, s16
; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1]
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v6, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
@@ -1411,7 +1372,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[4:5]
; GFX12-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2
; GFX12-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
@@ -1440,11 +1401,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v2, s16
; GFX11-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v6, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -1454,7 +1413,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
; GFX11-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2
; GFX11-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -1494,9 +1453,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v2, s20
; GFX908-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048
; GFX908-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v6, s20
; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1506,7 +1464,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v9, v2
; GFX908-NEXT: v_mov_b32_e32 v8, v1
; GFX908-NEXT: v_mov_b32_e32 v7, v0
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
@@ -1525,9 +1483,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v2, s20
; GFX8-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048
; GFX8-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v6, s20
; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1537,7 +1494,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v9, v2
; GFX8-NEXT: v_mov_b32_e32 v8, v1
; GFX8-NEXT: v_mov_b32_e32 v7, v0
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
@@ -1583,10 +1540,9 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
-; GFX12-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX12-NEXT: s_mov_b32 s1, exec_lo
; GFX12-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-NEXT: v_readfirstlane_b32 s4, v9
; GFX12-NEXT: v_readfirstlane_b32 s5, v10
; GFX12-NEXT: v_readfirstlane_b32 s6, v7
@@ -1600,12 +1556,11 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], null offen offset:2048
-; GFX12-NEXT: ; implicit-def: $vgpr4
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB7_1
; GFX12-NEXT: ; %bb.2:
; GFX12-NEXT: s_mov_b32 exec_lo, s1
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[5:6], v[5:6]
+; GFX12-NEXT: v_max_num_f64_e32 v[5:6], v[5:6], v[5:6]
; GFX12-NEXT: s_mov_b32 s1, 0
; GFX12-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Loop Header: Depth=1
@@ -1615,7 +1570,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_mov_b32 s2, exec_lo
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[11:12], v[0:1], v[4:5]
+; GFX12-NEXT: v_max_num_f64_e32 v[11:12], v[0:1], v[5:6]
; GFX12-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12
; GFX12-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14
; GFX12-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1
@@ -1632,7 +1587,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB7_4
; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -1686,27 +1641,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s4, v9
; GFX11-NEXT: v_readfirstlane_b32 s5, v10
; GFX11-NEXT: v_readfirstlane_b32 s6, v7
; GFX11-NEXT: v_readfirstlane_b32 s7, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[9:10]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[7:8]
; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], 0 offen offset:2048
-; GFX11-NEXT: ; implicit-def: $vgpr4
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB7_1
; GFX11-NEXT: ; %bb.2:
; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6]
+; GFX11-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX11-NEXT: .p2align 6
; GFX11-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Loop Header: Depth=1
@@ -1716,7 +1670,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[11:12], v[0:1], v[4:5]
+; GFX11-NEXT: v_max_f64 v[11:12], v[0:1], v[5:6]
; GFX11-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12
; GFX11-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14
; GFX11-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1
@@ -1732,7 +1686,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], 0 offen offset:2048 glc
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB7_4
; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -1816,7 +1770,6 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: v_mov_b32_e32 v7, v2
; GFX908-NEXT: v_mov_b32_e32 v10, v1
; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_add_u32_e32 v15, 0x800, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v9
@@ -1829,12 +1782,11 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
; GFX908-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX908-NEXT: ; implicit-def: $vgpr4
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB7_1
; GFX908-NEXT: ; %bb.2:
; GFX908-NEXT: s_mov_b64 exec, s[6:7]
-; GFX908-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6]
+; GFX908-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Loop Header: Depth=1
@@ -1842,7 +1794,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14]
; GFX908-NEXT: s_mov_b64 s[12:13], exec
-; GFX908-NEXT: v_max_f64 v[11:12], v[0:1], v[4:5]
+; GFX908-NEXT: v_max_f64 v[11:12], v[0:1], v[5:6]
; GFX908-NEXT: v_mov_b32_e32 v0, v11
; GFX908-NEXT: v_mov_b32_e32 v1, v12
; GFX908-NEXT: v_mov_b32_e32 v2, v13
@@ -1858,7 +1810,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB7_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -1882,7 +1834,6 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: v_mov_b32_e32 v7, v2
; GFX8-NEXT: v_mov_b32_e32 v10, v1
; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0x800, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v9
@@ -1895,12 +1846,11 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX8-NEXT: ; implicit-def: $vgpr4
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB7_1
; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_mov_b64 exec, s[6:7]
-; GFX8-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6]
+; GFX8-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Loop Header: Depth=1
@@ -1908,7 +1858,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14]
; GFX8-NEXT: s_mov_b64 s[12:13], exec
-; GFX8-NEXT: v_max_f64 v[11:12], v[0:1], v[4:5]
+; GFX8-NEXT: v_max_f64 v[11:12], v[0:1], v[5:6]
; GFX8-NEXT: v_mov_b32_e32 v0, v11
; GFX8-NEXT: v_mov_b32_e32 v1, v12
; GFX8-NEXT: v_mov_b32_e32 v2, v13
@@ -1924,7 +1874,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB7_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -2008,29 +1958,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v2, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2053,30 +2001,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v2, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2088,31 +2034,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v2, v0
-; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: v_mov_b32_e32 v3, v1
-; GFX10-NEXT: s_add_i32 s4, s20, 0x800
-; GFX10-NEXT: v_mov_b32_e32 v6, s4
-; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX10-NEXT: v_mov_b32_e32 v2, s20
+; GFX10-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v8, s20
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v10, v1
-; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX10-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX10-NEXT: v_mov_b32_e32 v0, v7
-; GFX10-NEXT: v_mov_b32_e32 v1, v8
-; GFX10-NEXT: v_mov_b32_e32 v2, v9
-; GFX10-NEXT: v_mov_b32_e32 v3, v10
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX10-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX10-NEXT: v_mov_b32_e32 v0, v2
+; GFX10-NEXT: v_mov_b32_e32 v1, v3
+; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v3, v5
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_mov_b32_e32 v5, v1
+; GFX10-NEXT: v_mov_b32_e32 v4, v0
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB8_1
@@ -2123,27 +2066,24 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_mov_b32_e32 v2, v0
-; GFX90A-NEXT: v_mov_b32_e32 v0, s20
-; GFX90A-NEXT: v_mov_b32_e32 v3, v1
-; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x800
+; GFX90A-NEXT: v_mov_b32_e32 v2, s20
+; GFX90A-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX90A-NEXT: v_mov_b32_e32 v6, s6
+; GFX90A-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX90A-NEXT: v_mov_b32_e32 v8, s20
; GFX90A-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
-; GFX90A-NEXT: v_max_f64 v[8:9], v[0:1], v[4:5]
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1]
-; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX90A-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX90A-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB8_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2153,30 +2093,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v0, s20
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
+; GFX908-NEXT: v_mov_b32_e32 v2, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB8_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2186,30 +2123,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, s20
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
+; GFX8-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB8_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2219,30 +2153,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, v0
-; GFX7-NEXT: v_mov_b32_e32 v0, s20
-; GFX7-NEXT: v_mov_b32_e32 v3, v1
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX7-NEXT: s_add_i32 s6, s20, 0x800
-; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
+; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v6, s6
+; GFX7-NEXT: v_mov_b32_e32 v8, s20
; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v10, v1
-; GFX7-NEXT: v_mov_b32_e32 v9, v0
-; GFX7-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX7-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v8
-; GFX7-NEXT: v_mov_b32_e32 v2, v9
-; GFX7-NEXT: v_mov_b32_e32 v3, v10
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX7-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX7-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v0, v2
+; GFX7-NEXT: v_mov_b32_e32 v1, v3
+; GFX7-NEXT: v_mov_b32_e32 v2, v4
+; GFX7-NEXT: v_mov_b32_e32 v3, v5
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v5, v1
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v4, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB8_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2252,31 +2183,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v2, v0
-; GFX6-NEXT: v_mov_b32_e32 v0, s20
-; GFX6-NEXT: v_mov_b32_e32 v3, v1
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX6-NEXT: v_mov_b32_e32 v2, s20
+; GFX6-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX6-NEXT: s_add_i32 s6, s20, 0x800
-; GFX6-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX6-NEXT: s_mov_b64 s[4:5], 0
-; GFX6-NEXT: v_mov_b32_e32 v6, s6
+; GFX6-NEXT: v_mov_b32_e32 v8, s6
; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v10, v1
-; GFX6-NEXT: v_mov_b32_e32 v9, v0
-; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX6-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v0, v7
-; GFX6-NEXT: v_mov_b32_e32 v1, v8
-; GFX6-NEXT: v_mov_b32_e32 v2, v9
-; GFX6-NEXT: v_mov_b32_e32 v3, v10
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GFX6-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX6-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX6-NEXT: v_mov_b32_e32 v0, v2
+; GFX6-NEXT: v_mov_b32_e32 v1, v3
+; GFX6-NEXT: v_mov_b32_e32 v2, v4
+; GFX6-NEXT: v_mov_b32_e32 v3, v5
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v5, v1
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v4, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB8_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2296,29 +2224,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v2, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2341,30 +2267,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v2, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2396,30 +2320,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v0, s20
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
+; GFX908-NEXT: v_mov_b32_e32 v2, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB9_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2429,30 +2350,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, s20
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
+; GFX8-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB9_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6146,13 +6064,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v3, s4
+; GFX12-NEXT: v_mov_b32_e32 v3, s16
+; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12-NEXT: v_pk_max_num_f16 v2, v1, v1
; GFX12-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
-; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
@@ -6163,7 +6079,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX12-NEXT: v_pk_max_num_f16 v4, v0, v2
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
@@ -6182,10 +6098,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_pk_max_f16 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6195,7 +6110,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX942-NEXT: v_pk_max_f16 v4, v0, v2
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -6210,12 +6125,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_pk_max_f16 v2, v1, v1
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -6226,7 +6140,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX11-NEXT: v_pk_max_f16 v4, v0, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -6244,11 +6158,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: v_pk_max_f16 v2, v1, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -6258,7 +6171,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: v_pk_max_f16 v4, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -6276,10 +6189,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_pk_max_f16 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6287,7 +6199,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX90A-NEXT: v_pk_max_f16 v0, v5, v5
; GFX90A-NEXT: v_pk_max_f16 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -6304,10 +6216,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_pk_max_f16 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -6316,7 +6227,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_pk_max_f16 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -6333,11 +6244,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_e32 v3, v1, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s6
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -6349,7 +6259,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_or_b32_e32 v5, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -6367,7 +6277,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -6375,7 +6284,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -6392,7 +6301,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_or_b32_e32 v5, v7, v0
; GFX7-NEXT: v_mov_b32_e32 v8, v6
; GFX7-NEXT: v_mov_b32_e32 v7, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7
@@ -6467,10 +6376,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v1, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400
; GFX12-NEXT: v_pk_max_num_f16 v2, v0, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v3, s4
+; GFX12-NEXT: v_mov_b32_e32 v3, s16
; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start
@@ -6481,7 +6388,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v2
; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
@@ -6500,10 +6407,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6512,7 +6418,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX942-NEXT: v_pk_max_f16 v0, v0, v2
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -6528,9 +6434,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v1, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start
@@ -6541,7 +6446,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_pk_max_f16 v0, v0, v2
; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -6559,9 +6464,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
@@ -6572,7 +6476,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX10-NEXT: v_pk_max_f16 v0, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v5, v1
; GFX10-NEXT: v_mov_b32_e32 v4, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -6590,17 +6494,16 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_pk_max_f16 v0, v1, v1
; GFX90A-NEXT: v_pk_max_f16 v0, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -6617,10 +6520,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -6628,7 +6530,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX908-NEXT: v_pk_max_f16 v0, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: v_mov_b32_e32 v4, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -6645,11 +6547,10 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_max_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_e32 v3, v0, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s6
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -6660,7 +6561,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -6679,7 +6580,6 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -6687,7 +6587,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
@@ -6704,7 +6604,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: v_or_b32_e32 v4, v6, v3
; GFX7-NEXT: v_mov_b32_e32 v7, v5
; GFX7-NEXT: v_mov_b32_e32 v6, v4
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6
@@ -6778,7 +6678,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX12-NEXT: s_mov_b32 s1, exec_lo
; GFX12-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: v_readfirstlane_b32 s4, v0
@@ -6793,8 +6692,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-NEXT: ; implicit-def: $vgpr4
+; GFX12-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB18_1
; GFX12-NEXT: ; %bb.2:
@@ -6805,13 +6703,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX12-NEXT: ; =>This Loop Header: Depth=1
; GFX12-NEXT: ; Child Loop BB18_4 Depth 2
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v4, v6, v6
+; GFX12-NEXT: v_pk_max_num_f16 v5, v7, v7
; GFX12-NEXT: s_mov_b32 s2, exec_lo
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_pk_max_num_f16 v5, v4, v8
-; GFX12-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-NEXT: v_pk_max_num_f16 v6, v5, v8
; GFX12-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-NEXT: v_mov_b32_e32 v6, v7
; GFX12-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX12-NEXT: ; => This Inner Loop Header: Depth=2
; GFX12-NEXT: v_readfirstlane_b32 s4, v0
@@ -6826,14 +6724,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB18_4
; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX12-NEXT: s_mov_b32 exec_lo, s2
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX12-NEXT: v_mov_b32_e32 v7, v5
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6841,14 +6739,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_cbranch_execnz .LBB18_3
; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-NEXT: v_mov_b32_e32 v0, v5
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -6860,23 +6757,22 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB18_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_pk_max_f16 v9, v5, v5
+; GFX942-NEXT: v_pk_max_f16 v5, v5, v5
; GFX942-NEXT: .LBB18_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB18_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_pk_max_f16 v4, v7, v7
+; GFX942-NEXT: v_pk_max_f16 v6, v9, v9
; GFX942-NEXT: s_mov_b64 s[8:9], exec
-; GFX942-NEXT: v_pk_max_f16 v6, v4, v9
+; GFX942-NEXT: v_pk_max_f16 v8, v6, v5
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -6889,27 +6785,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB18_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB18_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
@@ -6923,8 +6818,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-NEXT: ; implicit-def: $vgpr4
+; GFX11-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB18_1
; GFX11-NEXT: ; %bb.2:
@@ -6935,13 +6829,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX11-NEXT: ; =>This Loop Header: Depth=1
; GFX11-NEXT: ; Child Loop BB18_4 Depth 2
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v4, v6, v6
+; GFX11-NEXT: v_pk_max_f16 v5, v7, v7
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_pk_max_f16 v5, v4, v8
-; GFX11-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-NEXT: v_pk_max_f16 v6, v5, v8
; GFX11-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-NEXT: v_mov_b32_e32 v6, v7
; GFX11-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
@@ -6955,14 +6849,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB18_4
; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX11-NEXT: s_mov_b32 exec_lo, s2
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-NEXT: v_mov_b32_e32 v7, v5
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -6971,13 +6865,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_cbranch_execnz .LBB18_3
; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
@@ -6989,8 +6882,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3]
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
-; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
+; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB18_1
@@ -7001,12 +6893,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB18_4 Depth 2
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v4, v6, v6
+; GFX10-NEXT: v_pk_max_f16 v5, v7, v7
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_pk_max_f16 v5, v4, v8
-; GFX10-NEXT: v_mov_b32_e32 v4, v5
+; GFX10-NEXT: v_pk_max_f16 v6, v5, v8
; GFX10-NEXT: v_mov_b32_e32 v5, v6
+; GFX10-NEXT: v_mov_b32_e32 v6, v7
; GFX10-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
; GFX10-NEXT: v_readfirstlane_b32 s8, v0
@@ -7018,15 +6910,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB18_4
; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX10-NEXT: s_mov_b32 exec_lo, s6
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
@@ -7035,13 +6927,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX10-NEXT: s_cbranch_execnz .LBB18_3
; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -7053,22 +6944,21 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB18_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_pk_max_f16 v9, v5, v5
+; GFX90A-NEXT: v_pk_max_f16 v5, v5, v5
; GFX90A-NEXT: .LBB18_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB18_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_pk_max_f16 v4, v7, v7
-; GFX90A-NEXT: v_pk_max_f16 v6, v4, v9
+; GFX90A-NEXT: v_pk_max_f16 v6, v9, v9
+; GFX90A-NEXT: v_pk_max_f16 v8, v6, v5
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -7080,27 +6970,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB18_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB18_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -7112,8 +7001,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB18_1
; GFX908-NEXT: ; %bb.2:
@@ -7124,11 +7012,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB18_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_pk_max_f16 v4, v6, v6
-; GFX908-NEXT: v_pk_max_f16 v5, v4, v8
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_pk_max_f16 v5, v7, v7
+; GFX908-NEXT: v_pk_max_f16 v6, v5, v8
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -7140,27 +7028,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB18_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB18_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -7172,8 +7059,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB18_1
; GFX8-NEXT: ; %bb.2:
@@ -7185,14 +7071,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB18_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v5, v6, v6
-; GFX8-NEXT: v_max_f16_sdwa v4, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f16_e32 v5, v5, v9
-; GFX8-NEXT: v_or_b32_e32 v5, v5, v4
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_max_f16_sdwa v5, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v6, v7, v7
+; GFX8-NEXT: v_max_f16_sdwa v5, v5, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_e32 v6, v6, v9
+; GFX8-NEXT: v_or_b32_e32 v6, v6, v5
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -7204,27 +7090,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB18_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB18_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -7235,39 +7120,38 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3]
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
-; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
+; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB18_1
; GFX7-NEXT: ; %bb.2:
; GFX7-NEXT: s_mov_b64 exec, s[6:7]
; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6
-; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v9, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v6
-; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v9
; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: .LBB18_3: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Loop Header: Depth=1
; GFX7-NEXT: ; Child Loop BB18_4 Depth 2
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v7
; GFX7-NEXT: s_mov_b64 s[12:13], exec
-; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5
-; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v6
; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_max_f32_e32 v6, v6, v10
-; GFX7-NEXT: v_max_f32_e32 v7, v7, v11
-; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v6
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v5
+; GFX7-NEXT: v_max_f32_e32 v7, v7, v10
+; GFX7-NEXT: v_max_f32_e32 v8, v8, v11
; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7
-; GFX7-NEXT: v_or_b32_e32 v6, v4, v5
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v8
-; GFX7-NEXT: v_or_b32_e32 v5, v7, v4
-; GFX7-NEXT: v_mov_b32_e32 v8, v6
-; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_or_b32_e32 v5, v8, v5
+; GFX7-NEXT: v_mov_b32_e32 v9, v6
+; GFX7-NEXT: v_mov_b32_e32 v8, v5
; GFX7-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX7-NEXT: ; => This Inner Loop Header: Depth=2
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -7279,23 +7163,23 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v9, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB18_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX7-NEXT: s_mov_b64 exec, s[12:13]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX7-NEXT: s_cbranch_execnz .LBB18_3
; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v4
+; GFX7-NEXT: v_mov_b32_e32 v0, v7
; GFX7-NEXT: v_mov_b32_e32 v1, v5
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -7396,13 +7280,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX12-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
@@ -7431,7 +7313,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
@@ -7452,11 +7334,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400
; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX12-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
@@ -7487,7 +7367,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX12-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
@@ -7506,13 +7386,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -7534,7 +7413,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5]
; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -7549,12 +7428,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
@@ -7583,7 +7461,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -7601,10 +7479,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
@@ -7635,7 +7512,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -7654,9 +7531,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
@@ -7682,7 +7558,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: v_mov_b32_e32 v1, v6
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -7700,13 +7576,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -7727,7 +7602,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -7744,13 +7619,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -7772,7 +7646,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: v_mov_b32_e32 v1, v6
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -7789,11 +7663,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -7818,7 +7691,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -7834,7 +7707,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -7843,7 +7715,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
@@ -7858,7 +7730,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: v_alignbit_b32 v0, v0, v6, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v1
; GFX7-NEXT: v_mov_b32_e32 v5, v0
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -7928,11 +7800,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
-; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
; GFX12-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
@@ -7958,7 +7828,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
@@ -7980,11 +7850,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
-; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
; GFX12-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
@@ -8010,7 +7878,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
@@ -8029,13 +7897,12 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -8056,7 +7923,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5]
; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -8072,11 +7939,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start
@@ -8102,7 +7967,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -8121,11 +7986,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start
@@ -8151,7 +8014,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -8170,12 +8033,11 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -8197,7 +8059,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v6, v1
; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -8215,13 +8077,12 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -8241,7 +8102,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -8258,13 +8119,12 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -8285,7 +8145,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v6, v1
; GFX908-NEXT: v_mov_b32_e32 v5, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -8302,11 +8162,10 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -8330,7 +8189,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -8347,7 +8206,6 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v0
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -8356,7 +8214,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
@@ -8371,7 +8229,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: v_alignbit_b32 v3, v3, v6, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v4
; GFX7-NEXT: v_mov_b32_e32 v5, v3
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
@@ -8440,7 +8298,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo
; GFX12-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8455,8 +8312,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024
; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX12-TRUE16-NEXT: ; %bb.2:
@@ -8468,30 +8324,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1
; GFX12-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v8 :: v_dual_max_num_f32 v4, v4, v9
-; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v6, v6, v8 :: v_dual_max_num_f32 v5, v5, v9
+; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5
; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v7
; GFX12-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8506,14 +8362,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v7, v5
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -8521,7 +8377,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_3
; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v5
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -8532,7 +8388,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo
; GFX12-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8547,8 +8402,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024
; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX12-FAKE16-NEXT: ; %bb.2:
@@ -8560,30 +8414,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1
; GFX12-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v9 :: v_dual_max_num_f32 v4, v4, v8
-; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v6, v6, v9 :: v_dual_max_num_f32 v5, v5, v8
+; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX12-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5
; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v7
; GFX12-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8598,14 +8452,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v7, v5
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -8613,14 +8467,13 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_3
; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v5
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -8632,40 +8485,39 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB21_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX942-NEXT: v_lshlrev_b32_e32 v10, 16, v5
; GFX942-NEXT: s_movk_i32 s10, 0x7fff
-; GFX942-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX942-NEXT: s_mov_b32 s11, 0x7060302
; GFX942-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB21_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v7
-; GFX942-NEXT: v_max_f32_e32 v4, v4, v9
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s10
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX942-NEXT: v_max_f32_e32 v6, v6, v10
+; GFX942-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX942-NEXT: v_add3_u32 v7, v7, v6, s10
+; GFX942-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX942-NEXT: s_mov_b64 s[8:9], exec
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v7
-; GFX942-NEXT: v_max_f32_e32 v5, v5, v10
-; GFX942-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX942-NEXT: v_add3_u32 v6, v6, v5, s10
-; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
+; GFX942-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GFX942-NEXT: v_and_b32_e32 v7, 0xffff0000, v9
+; GFX942-NEXT: v_max_f32_e32 v7, v7, v5
+; GFX942-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX942-NEXT: v_add3_u32 v8, v8, v7, s10
+; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc
-; GFX942-NEXT: v_perm_b32 v6, v5, v4, s11
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc
+; GFX942-NEXT: v_perm_b32 v8, v7, v6, s11
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -8678,27 +8530,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB21_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB21_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -8712,8 +8563,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX11-TRUE16-NEXT: ; %bb.2:
@@ -8726,28 +8576,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
; GFX11-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v8 :: v_dual_max_f32 v4, v4, v9
-; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_dual_max_f32 v6, v6, v8 :: v_dual_max_f32 v5, v5, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7
; GFX11-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8761,14 +8611,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v5
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -8778,13 +8628,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v5
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -8798,8 +8647,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX11-FAKE16-NEXT: ; %bb.2:
@@ -8812,28 +8660,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
; GFX11-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v9 :: v_dual_max_f32 v4, v4, v8
-; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_max_f32 v6, v6, v9 :: v_dual_max_f32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v7
; GFX11-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8847,14 +8695,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v5
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -8864,13 +8712,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v5
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -8882,8 +8729,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3]
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
-; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
+; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB21_1
@@ -8895,25 +8741,25 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB21_4 Depth 2
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_max_f32_e32 v4, v4, v8
-; GFX10-NEXT: v_max_f32_e32 v5, v5, v9
-; GFX10-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX10-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX10-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX10-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX10-NEXT: v_max_f32_e32 v5, v5, v8
+; GFX10-NEXT: v_max_f32_e32 v6, v6, v9
+; GFX10-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX10-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX10-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX10-NEXT: v_mov_b32_e32 v4, v5
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX10-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX10-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX10-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v5, v6
+; GFX10-NEXT: v_mov_b32_e32 v6, v7
; GFX10-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
; GFX10-NEXT: v_readfirstlane_b32 s8, v0
@@ -8925,15 +8771,15 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB21_4
; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX10-NEXT: s_mov_b32 exec_lo, s6
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
@@ -8942,13 +8788,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX10-NEXT: s_cbranch_execnz .LBB21_3
; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -8960,38 +8805,37 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB21_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX90A-NEXT: v_lshlrev_b32_e32 v10, 16, v5
; GFX90A-NEXT: s_movk_i32 s14, 0x7fff
-; GFX90A-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX90A-NEXT: s_mov_b32 s15, 0x7060302
; GFX90A-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB21_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v7
-; GFX90A-NEXT: v_max_f32_e32 v4, v4, v9
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s14
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v7
-; GFX90A-NEXT: v_max_f32_e32 v5, v5, v10
-; GFX90A-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX90A-NEXT: v_add3_u32 v6, v6, v5, s14
-; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc
-; GFX90A-NEXT: v_perm_b32 v6, v5, v4, s15
+; GFX90A-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX90A-NEXT: v_max_f32_e32 v6, v6, v10
+; GFX90A-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX90A-NEXT: v_add3_u32 v7, v7, v6, s14
+; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX90A-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v9
+; GFX90A-NEXT: v_max_f32_e32 v7, v7, v5
+; GFX90A-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX90A-NEXT: v_add3_u32 v8, v8, v7, s14
+; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX90A-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc
+; GFX90A-NEXT: v_perm_b32 v8, v7, v6, s15
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -9003,27 +8847,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB21_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB21_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -9035,8 +8878,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB21_1
; GFX908-NEXT: ; %bb.2:
@@ -9050,24 +8892,24 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB21_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX908-NEXT: v_max_f32_e32 v4, v4, v8
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s14
-; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX908-NEXT: v_max_f32_e32 v5, v5, v9
-; GFX908-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX908-NEXT: v_add3_u32 v10, v10, v5, s14
-; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX908-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX908-NEXT: v_max_f32_e32 v5, v5, v8
+; GFX908-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX908-NEXT: v_add3_u32 v6, v6, v5, s14
+; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc
-; GFX908-NEXT: v_perm_b32 v5, v5, v4, s15
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc
+; GFX908-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX908-NEXT: v_max_f32_e32 v6, v6, v9
+; GFX908-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX908-NEXT: v_add3_u32 v10, v10, v6, s14
+; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX908-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX908-NEXT: v_perm_b32 v6, v6, v5, s15
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -9079,27 +8921,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB21_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB21_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -9111,8 +8952,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB21_1
; GFX8-NEXT: ; %bb.2:
@@ -9124,27 +8964,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB21_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX8-NEXT: v_max_f32_e32 v4, v4, v8
-; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX8-NEXT: v_max_f32_e32 v5, v5, v9
-; GFX8-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v5
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10
-; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX8-NEXT: v_max_f32_e32 v5, v5, v8
+; GFX8-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v5
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v5, v5, v4, 16
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX8-NEXT: v_max_f32_e32 v6, v6, v9
+; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10
+; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_alignbit_b32 v6, v6, v5, 16
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -9156,27 +8996,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB21_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB21_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -9187,8 +9026,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3]
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
-; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
+; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB21_1
; GFX7-NEXT: ; %bb.2:
@@ -9196,27 +9034,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX7-NEXT: s_mov_b64 s[6:7], 0
-; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v6
-; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v5
; GFX7-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Loop Header: Depth=1
; GFX7-NEXT: ; Child Loop BB21_4 Depth 2
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v4
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v5
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v7
-; GFX7-NEXT: v_max_f32_e32 v4, v4, v9
-; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v8
+; GFX7-NEXT: v_max_f32_e32 v5, v5, v10
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_max_f32_e32 v7, v7, v10
-; GFX7-NEXT: v_alignbit_b32 v5, v5, v6, 16
-; GFX7-NEXT: v_alignbit_b32 v4, v4, v7, 16
-; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_max_f32_e32 v8, v8, v11
+; GFX7-NEXT: v_alignbit_b32 v6, v6, v7, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v5, v8, 16
+; GFX7-NEXT: v_mov_b32_e32 v9, v6
; GFX7-NEXT: s_mov_b64 s[12:13], exec
-; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: v_mov_b32_e32 v8, v5
; GFX7-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX7-NEXT: ; => This Inner Loop Header: Depth=2
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -9228,23 +9066,23 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v8, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB21_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX7-NEXT: s_mov_b64 exec, s[12:13]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v5
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX7-NEXT: s_cbranch_execnz .LBB21_3
; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v4
+; GFX7-NEXT: v_mov_b32_e32 v0, v8
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -9353,10 +9191,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -9365,7 +9202,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: v_max_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc0 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -9404,10 +9241,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -9416,7 +9252,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1
@@ -9434,10 +9270,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -9446,7 +9281,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_max_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -9463,10 +9298,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -9475,7 +9309,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_max_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll
index 8ac6353..671f42c 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll
@@ -37,10 +37,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -49,7 +48,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_min_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -88,10 +87,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -99,7 +97,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -116,10 +114,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -128,7 +125,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_min_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -145,10 +142,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -157,7 +153,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_min_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -212,10 +208,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v0, v0
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -223,7 +218,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: v_min_f32_e32 v0, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -262,17 +257,16 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v0, v0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_max_f32_e32 v0, v1, v1
; GFX90A-NEXT: v_min_f32_e32 v0, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -289,10 +283,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v0, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -300,7 +293,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_min_f32_e32 v0, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: v_mov_b32_e32 v4, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -317,10 +310,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -328,7 +320,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_min_f32_e32 v0, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: v_mov_b32_e32 v4, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -402,7 +394,6 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -414,22 +405,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB2_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_max_f32_e32 v9, v5, v5
+; GFX942-NEXT: v_max_f32_e32 v5, v5, v5
; GFX942-NEXT: .LBB2_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB2_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_max_f32_e32 v4, v7, v7
-; GFX942-NEXT: v_min_f32_e32 v6, v4, v9
+; GFX942-NEXT: v_max_f32_e32 v6, v9, v9
+; GFX942-NEXT: v_min_f32_e32 v8, v6, v5
; GFX942-NEXT: s_mov_b64 s[8:9], exec
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: buffer_wbl2 sc1
; GFX942-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
@@ -443,21 +433,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB2_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB2_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -522,7 +512,6 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -534,22 +523,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_max_f32_e32 v9, v5, v5
+; GFX90A-NEXT: v_max_f32_e32 v5, v5, v5
; GFX90A-NEXT: .LBB2_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB2_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_max_f32_e32 v4, v7, v7
-; GFX90A-NEXT: v_min_f32_e32 v6, v4, v9
+; GFX90A-NEXT: v_max_f32_e32 v6, v9, v9
+; GFX90A-NEXT: v_min_f32_e32 v8, v6, v5
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -561,27 +549,26 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -593,8 +580,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_1
; GFX908-NEXT: ; %bb.2:
@@ -605,11 +591,11 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB2_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_max_f32_e32 v4, v6, v6
-; GFX908-NEXT: v_min_f32_e32 v5, v4, v8
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_max_f32_e32 v5, v7, v7
+; GFX908-NEXT: v_min_f32_e32 v6, v5, v8
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -621,27 +607,26 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB2_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -653,8 +638,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_1
; GFX8-NEXT: ; %bb.2:
@@ -665,11 +649,11 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB2_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v6
-; GFX8-NEXT: v_min_f32_e32 v5, v4, v8
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mul_f32_e32 v5, 1.0, v7
+; GFX8-NEXT: v_min_f32_e32 v6, v5, v8
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -681,21 +665,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB2_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -777,10 +761,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -789,7 +772,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX942-NEXT: v_min_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -804,11 +787,10 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_max_f32 v2, v1, v1
-; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v3, s16 :: v_dual_max_f32 v2, v1, v1
+; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -819,7 +801,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX11-NEXT: v_min_f32_e32 v4, v0, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -837,11 +819,10 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: v_max_f32_e32 v2, v1, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -851,7 +832,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: v_min_f32_e32 v4, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -869,10 +850,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -880,7 +860,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -897,10 +877,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -909,7 +888,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_min_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -926,10 +905,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -938,7 +916,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_min_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -955,10 +933,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_mov_b32_e32 v1, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -967,7 +944,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_min_f32_e32 v4, v0, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1035,10 +1012,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -1047,7 +1023,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_min_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1086,10 +1062,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -1097,7 +1072,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1114,10 +1089,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1126,7 +1100,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_min_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1143,10 +1117,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1155,7 +1128,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_min_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1201,29 +1174,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v2, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1246,30 +1217,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v2, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1301,30 +1270,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v0, s20
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
+; GFX908-NEXT: v_mov_b32_e32 v2, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB5_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1334,30 +1300,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, s20
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
+; GFX8-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB5_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1397,11 +1360,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v2, s16
; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1]
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v6, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
@@ -1411,7 +1372,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[4:5]
; GFX12-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2
; GFX12-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
@@ -1440,11 +1401,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v2, s16
; GFX11-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v6, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -1454,7 +1413,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
; GFX11-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2
; GFX11-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -1494,9 +1453,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v2, s20
; GFX908-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048
; GFX908-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v6, s20
; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1506,7 +1464,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v9, v2
; GFX908-NEXT: v_mov_b32_e32 v8, v1
; GFX908-NEXT: v_mov_b32_e32 v7, v0
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
@@ -1525,9 +1483,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v2, s20
; GFX8-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048
; GFX8-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v6, s20
; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1537,7 +1494,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v9, v2
; GFX8-NEXT: v_mov_b32_e32 v8, v1
; GFX8-NEXT: v_mov_b32_e32 v7, v0
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
@@ -1583,10 +1540,9 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
-; GFX12-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX12-NEXT: s_mov_b32 s1, exec_lo
; GFX12-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-NEXT: v_readfirstlane_b32 s4, v9
; GFX12-NEXT: v_readfirstlane_b32 s5, v10
; GFX12-NEXT: v_readfirstlane_b32 s6, v7
@@ -1600,12 +1556,11 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], null offen offset:2048
-; GFX12-NEXT: ; implicit-def: $vgpr4
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB7_1
; GFX12-NEXT: ; %bb.2:
; GFX12-NEXT: s_mov_b32 exec_lo, s1
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[5:6], v[5:6]
+; GFX12-NEXT: v_max_num_f64_e32 v[5:6], v[5:6], v[5:6]
; GFX12-NEXT: s_mov_b32 s1, 0
; GFX12-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Loop Header: Depth=1
@@ -1615,7 +1570,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_mov_b32 s2, exec_lo
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f64_e32 v[11:12], v[0:1], v[4:5]
+; GFX12-NEXT: v_min_num_f64_e32 v[11:12], v[0:1], v[5:6]
; GFX12-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12
; GFX12-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14
; GFX12-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1
@@ -1632,7 +1587,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB7_4
; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -1686,27 +1641,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s4, v9
; GFX11-NEXT: v_readfirstlane_b32 s5, v10
; GFX11-NEXT: v_readfirstlane_b32 s6, v7
; GFX11-NEXT: v_readfirstlane_b32 s7, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[9:10]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[7:8]
; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], 0 offen offset:2048
-; GFX11-NEXT: ; implicit-def: $vgpr4
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB7_1
; GFX11-NEXT: ; %bb.2:
; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6]
+; GFX11-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX11-NEXT: .p2align 6
; GFX11-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Loop Header: Depth=1
@@ -1716,7 +1670,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f64 v[11:12], v[0:1], v[4:5]
+; GFX11-NEXT: v_min_f64 v[11:12], v[0:1], v[5:6]
; GFX11-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12
; GFX11-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14
; GFX11-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1
@@ -1732,7 +1686,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], 0 offen offset:2048 glc
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB7_4
; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -1816,7 +1770,6 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: v_mov_b32_e32 v7, v2
; GFX908-NEXT: v_mov_b32_e32 v10, v1
; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_add_u32_e32 v15, 0x800, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v9
@@ -1829,12 +1782,11 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
; GFX908-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX908-NEXT: ; implicit-def: $vgpr4
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB7_1
; GFX908-NEXT: ; %bb.2:
; GFX908-NEXT: s_mov_b64 exec, s[6:7]
-; GFX908-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6]
+; GFX908-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Loop Header: Depth=1
@@ -1842,7 +1794,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14]
; GFX908-NEXT: s_mov_b64 s[12:13], exec
-; GFX908-NEXT: v_min_f64 v[11:12], v[0:1], v[4:5]
+; GFX908-NEXT: v_min_f64 v[11:12], v[0:1], v[5:6]
; GFX908-NEXT: v_mov_b32_e32 v0, v11
; GFX908-NEXT: v_mov_b32_e32 v1, v12
; GFX908-NEXT: v_mov_b32_e32 v2, v13
@@ -1858,7 +1810,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB7_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -1882,7 +1834,6 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: v_mov_b32_e32 v7, v2
; GFX8-NEXT: v_mov_b32_e32 v10, v1
; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0x800, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v9
@@ -1895,12 +1846,11 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX8-NEXT: ; implicit-def: $vgpr4
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB7_1
; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_mov_b64 exec, s[6:7]
-; GFX8-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6]
+; GFX8-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Loop Header: Depth=1
@@ -1908,7 +1858,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14]
; GFX8-NEXT: s_mov_b64 s[12:13], exec
-; GFX8-NEXT: v_min_f64 v[11:12], v[0:1], v[4:5]
+; GFX8-NEXT: v_min_f64 v[11:12], v[0:1], v[5:6]
; GFX8-NEXT: v_mov_b32_e32 v0, v11
; GFX8-NEXT: v_mov_b32_e32 v1, v12
; GFX8-NEXT: v_mov_b32_e32 v2, v13
@@ -1924,7 +1874,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB7_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -2008,29 +1958,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v2, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2053,30 +2001,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v2, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2088,31 +2034,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v2, v0
-; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: v_mov_b32_e32 v3, v1
-; GFX10-NEXT: s_add_i32 s4, s20, 0x800
-; GFX10-NEXT: v_mov_b32_e32 v6, s4
-; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX10-NEXT: v_mov_b32_e32 v2, s20
+; GFX10-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v8, s20
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v10, v1
-; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX10-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX10-NEXT: v_mov_b32_e32 v0, v7
-; GFX10-NEXT: v_mov_b32_e32 v1, v8
-; GFX10-NEXT: v_mov_b32_e32 v2, v9
-; GFX10-NEXT: v_mov_b32_e32 v3, v10
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX10-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX10-NEXT: v_mov_b32_e32 v0, v2
+; GFX10-NEXT: v_mov_b32_e32 v1, v3
+; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v3, v5
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_mov_b32_e32 v5, v1
+; GFX10-NEXT: v_mov_b32_e32 v4, v0
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB8_1
@@ -2123,27 +2066,24 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_mov_b32_e32 v2, v0
-; GFX90A-NEXT: v_mov_b32_e32 v0, s20
-; GFX90A-NEXT: v_mov_b32_e32 v3, v1
-; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x800
+; GFX90A-NEXT: v_mov_b32_e32 v2, s20
+; GFX90A-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX90A-NEXT: v_mov_b32_e32 v6, s6
+; GFX90A-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX90A-NEXT: v_mov_b32_e32 v8, s20
; GFX90A-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
-; GFX90A-NEXT: v_min_f64 v[8:9], v[0:1], v[4:5]
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1]
-; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX90A-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX90A-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB8_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2153,30 +2093,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v0, s20
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
+; GFX908-NEXT: v_mov_b32_e32 v2, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB8_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2186,30 +2123,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, s20
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
+; GFX8-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB8_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2219,30 +2153,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, v0
-; GFX7-NEXT: v_mov_b32_e32 v0, s20
-; GFX7-NEXT: v_mov_b32_e32 v3, v1
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX7-NEXT: s_add_i32 s6, s20, 0x800
-; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
+; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v6, s6
+; GFX7-NEXT: v_mov_b32_e32 v8, s20
; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v10, v1
-; GFX7-NEXT: v_mov_b32_e32 v9, v0
-; GFX7-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX7-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v8
-; GFX7-NEXT: v_mov_b32_e32 v2, v9
-; GFX7-NEXT: v_mov_b32_e32 v3, v10
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX7-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX7-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v0, v2
+; GFX7-NEXT: v_mov_b32_e32 v1, v3
+; GFX7-NEXT: v_mov_b32_e32 v2, v4
+; GFX7-NEXT: v_mov_b32_e32 v3, v5
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v5, v1
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v4, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB8_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2252,31 +2183,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v2, v0
-; GFX6-NEXT: v_mov_b32_e32 v0, s20
-; GFX6-NEXT: v_mov_b32_e32 v3, v1
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX6-NEXT: v_mov_b32_e32 v2, s20
+; GFX6-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX6-NEXT: s_add_i32 s6, s20, 0x800
-; GFX6-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX6-NEXT: s_mov_b64 s[4:5], 0
-; GFX6-NEXT: v_mov_b32_e32 v6, s6
+; GFX6-NEXT: v_mov_b32_e32 v8, s6
; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v10, v1
-; GFX6-NEXT: v_mov_b32_e32 v9, v0
-; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX6-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v0, v7
-; GFX6-NEXT: v_mov_b32_e32 v1, v8
-; GFX6-NEXT: v_mov_b32_e32 v2, v9
-; GFX6-NEXT: v_mov_b32_e32 v3, v10
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GFX6-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX6-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX6-NEXT: v_mov_b32_e32 v0, v2
+; GFX6-NEXT: v_mov_b32_e32 v1, v3
+; GFX6-NEXT: v_mov_b32_e32 v2, v4
+; GFX6-NEXT: v_mov_b32_e32 v3, v5
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v5, v1
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v4, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB8_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2296,29 +2224,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v2, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2341,30 +2267,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v2, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2396,30 +2320,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v0, s20
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
+; GFX908-NEXT: v_mov_b32_e32 v2, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB9_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2429,30 +2350,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, s20
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
+; GFX8-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB9_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6146,13 +6064,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v3, s4
+; GFX12-NEXT: v_mov_b32_e32 v3, s16
+; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12-NEXT: v_pk_max_num_f16 v2, v1, v1
; GFX12-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
-; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
@@ -6163,7 +6079,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX12-NEXT: v_pk_min_num_f16 v4, v0, v2
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
@@ -6182,10 +6098,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_pk_max_f16 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6195,7 +6110,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX942-NEXT: v_pk_min_f16 v4, v0, v2
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -6210,12 +6125,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_pk_max_f16 v2, v1, v1
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -6226,7 +6140,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX11-NEXT: v_pk_min_f16 v4, v0, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -6244,11 +6158,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: v_pk_max_f16 v2, v1, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -6258,7 +6171,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: v_pk_min_f16 v4, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -6276,10 +6189,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_pk_max_f16 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6287,7 +6199,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX90A-NEXT: v_pk_max_f16 v0, v5, v5
; GFX90A-NEXT: v_pk_min_f16 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -6304,10 +6216,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_pk_max_f16 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -6316,7 +6227,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_pk_min_f16 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -6333,11 +6244,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_e32 v3, v1, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s6
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -6349,7 +6259,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_or_b32_e32 v5, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -6367,7 +6277,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -6375,7 +6284,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -6392,7 +6301,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_or_b32_e32 v5, v7, v0
; GFX7-NEXT: v_mov_b32_e32 v8, v6
; GFX7-NEXT: v_mov_b32_e32 v7, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7
@@ -6467,10 +6376,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v1, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400
; GFX12-NEXT: v_pk_max_num_f16 v2, v0, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v3, s4
+; GFX12-NEXT: v_mov_b32_e32 v3, s16
; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start
@@ -6481,7 +6388,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_pk_min_num_f16 v0, v0, v2
; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
@@ -6500,10 +6407,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6512,7 +6418,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX942-NEXT: v_pk_min_f16 v0, v0, v2
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -6528,9 +6434,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v1, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start
@@ -6541,7 +6446,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_pk_min_f16 v0, v0, v2
; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -6559,9 +6464,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
@@ -6572,7 +6476,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX10-NEXT: v_pk_min_f16 v0, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v5, v1
; GFX10-NEXT: v_mov_b32_e32 v4, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -6590,17 +6494,16 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_pk_max_f16 v0, v1, v1
; GFX90A-NEXT: v_pk_min_f16 v0, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -6617,10 +6520,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -6628,7 +6530,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX908-NEXT: v_pk_min_f16 v0, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: v_mov_b32_e32 v4, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -6645,11 +6547,10 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_max_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_e32 v3, v0, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s6
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -6660,7 +6561,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -6679,7 +6580,6 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -6687,7 +6587,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
@@ -6704,7 +6604,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: v_or_b32_e32 v4, v6, v3
; GFX7-NEXT: v_mov_b32_e32 v7, v5
; GFX7-NEXT: v_mov_b32_e32 v6, v4
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6
@@ -6778,7 +6678,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX12-NEXT: s_mov_b32 s1, exec_lo
; GFX12-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: v_readfirstlane_b32 s4, v0
@@ -6793,8 +6692,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-NEXT: ; implicit-def: $vgpr4
+; GFX12-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB18_1
; GFX12-NEXT: ; %bb.2:
@@ -6805,13 +6703,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX12-NEXT: ; =>This Loop Header: Depth=1
; GFX12-NEXT: ; Child Loop BB18_4 Depth 2
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v4, v6, v6
+; GFX12-NEXT: v_pk_max_num_f16 v5, v7, v7
; GFX12-NEXT: s_mov_b32 s2, exec_lo
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_pk_min_num_f16 v5, v4, v8
-; GFX12-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-NEXT: v_pk_min_num_f16 v6, v5, v8
; GFX12-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-NEXT: v_mov_b32_e32 v6, v7
; GFX12-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX12-NEXT: ; => This Inner Loop Header: Depth=2
; GFX12-NEXT: v_readfirstlane_b32 s4, v0
@@ -6826,14 +6724,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB18_4
; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX12-NEXT: s_mov_b32 exec_lo, s2
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX12-NEXT: v_mov_b32_e32 v7, v5
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6841,14 +6739,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_cbranch_execnz .LBB18_3
; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-NEXT: v_mov_b32_e32 v0, v5
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -6860,23 +6757,22 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB18_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_pk_max_f16 v9, v5, v5
+; GFX942-NEXT: v_pk_max_f16 v5, v5, v5
; GFX942-NEXT: .LBB18_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB18_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_pk_max_f16 v4, v7, v7
+; GFX942-NEXT: v_pk_max_f16 v6, v9, v9
; GFX942-NEXT: s_mov_b64 s[8:9], exec
-; GFX942-NEXT: v_pk_min_f16 v6, v4, v9
+; GFX942-NEXT: v_pk_min_f16 v8, v6, v5
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -6889,27 +6785,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB18_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB18_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
@@ -6923,8 +6818,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-NEXT: ; implicit-def: $vgpr4
+; GFX11-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB18_1
; GFX11-NEXT: ; %bb.2:
@@ -6935,13 +6829,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX11-NEXT: ; =>This Loop Header: Depth=1
; GFX11-NEXT: ; Child Loop BB18_4 Depth 2
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v4, v6, v6
+; GFX11-NEXT: v_pk_max_f16 v5, v7, v7
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_pk_min_f16 v5, v4, v8
-; GFX11-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-NEXT: v_pk_min_f16 v6, v5, v8
; GFX11-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-NEXT: v_mov_b32_e32 v6, v7
; GFX11-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
@@ -6955,14 +6849,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB18_4
; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX11-NEXT: s_mov_b32 exec_lo, s2
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-NEXT: v_mov_b32_e32 v7, v5
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -6971,13 +6865,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_cbranch_execnz .LBB18_3
; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
@@ -6989,8 +6882,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3]
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
-; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
+; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB18_1
@@ -7001,12 +6893,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB18_4 Depth 2
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v4, v6, v6
+; GFX10-NEXT: v_pk_max_f16 v5, v7, v7
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_pk_min_f16 v5, v4, v8
-; GFX10-NEXT: v_mov_b32_e32 v4, v5
+; GFX10-NEXT: v_pk_min_f16 v6, v5, v8
; GFX10-NEXT: v_mov_b32_e32 v5, v6
+; GFX10-NEXT: v_mov_b32_e32 v6, v7
; GFX10-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
; GFX10-NEXT: v_readfirstlane_b32 s8, v0
@@ -7018,15 +6910,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB18_4
; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX10-NEXT: s_mov_b32 exec_lo, s6
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
@@ -7035,13 +6927,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX10-NEXT: s_cbranch_execnz .LBB18_3
; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -7053,22 +6944,21 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB18_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_pk_max_f16 v9, v5, v5
+; GFX90A-NEXT: v_pk_max_f16 v5, v5, v5
; GFX90A-NEXT: .LBB18_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB18_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_pk_max_f16 v4, v7, v7
-; GFX90A-NEXT: v_pk_min_f16 v6, v4, v9
+; GFX90A-NEXT: v_pk_max_f16 v6, v9, v9
+; GFX90A-NEXT: v_pk_min_f16 v8, v6, v5
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -7080,27 +6970,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB18_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB18_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -7112,8 +7001,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB18_1
; GFX908-NEXT: ; %bb.2:
@@ -7124,11 +7012,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB18_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_pk_max_f16 v4, v6, v6
-; GFX908-NEXT: v_pk_min_f16 v5, v4, v8
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_pk_max_f16 v5, v7, v7
+; GFX908-NEXT: v_pk_min_f16 v6, v5, v8
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -7140,27 +7028,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB18_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB18_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -7172,8 +7059,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB18_1
; GFX8-NEXT: ; %bb.2:
@@ -7185,14 +7071,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB18_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v5, v6, v6
-; GFX8-NEXT: v_min_f16_sdwa v4, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_min_f16_e32 v5, v5, v9
-; GFX8-NEXT: v_or_b32_e32 v5, v5, v4
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_max_f16_sdwa v5, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v6, v7, v7
+; GFX8-NEXT: v_min_f16_sdwa v5, v5, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_f16_e32 v6, v6, v9
+; GFX8-NEXT: v_or_b32_e32 v6, v6, v5
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -7204,27 +7090,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB18_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB18_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -7235,39 +7120,38 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3]
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
-; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
+; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB18_1
; GFX7-NEXT: ; %bb.2:
; GFX7-NEXT: s_mov_b64 exec, s[6:7]
; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6
-; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v9, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v6
-; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v9
; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: .LBB18_3: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Loop Header: Depth=1
; GFX7-NEXT: ; Child Loop BB18_4 Depth 2
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v7
; GFX7-NEXT: s_mov_b64 s[12:13], exec
-; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5
-; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v6
; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_min_f32_e32 v6, v6, v10
-; GFX7-NEXT: v_min_f32_e32 v7, v7, v11
-; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v6
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v5
+; GFX7-NEXT: v_min_f32_e32 v7, v7, v10
+; GFX7-NEXT: v_min_f32_e32 v8, v8, v11
; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7
-; GFX7-NEXT: v_or_b32_e32 v6, v4, v5
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v8
-; GFX7-NEXT: v_or_b32_e32 v5, v7, v4
-; GFX7-NEXT: v_mov_b32_e32 v8, v6
-; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_or_b32_e32 v5, v8, v5
+; GFX7-NEXT: v_mov_b32_e32 v9, v6
+; GFX7-NEXT: v_mov_b32_e32 v8, v5
; GFX7-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX7-NEXT: ; => This Inner Loop Header: Depth=2
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -7279,23 +7163,23 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v9, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB18_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX7-NEXT: s_mov_b64 exec, s[12:13]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX7-NEXT: s_cbranch_execnz .LBB18_3
; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v4
+; GFX7-NEXT: v_mov_b32_e32 v0, v7
; GFX7-NEXT: v_mov_b32_e32 v1, v5
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -7396,13 +7280,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX12-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
@@ -7431,7 +7313,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
@@ -7452,11 +7334,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400
; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX12-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
@@ -7487,7 +7367,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX12-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
@@ -7506,13 +7386,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -7534,7 +7413,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5]
; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -7549,12 +7428,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
@@ -7583,7 +7461,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -7601,10 +7479,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
@@ -7635,7 +7512,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -7654,9 +7531,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
@@ -7682,7 +7558,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: v_mov_b32_e32 v1, v6
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -7700,13 +7576,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -7727,7 +7602,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -7744,13 +7619,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -7772,7 +7646,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: v_mov_b32_e32 v1, v6
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -7789,11 +7663,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -7818,7 +7691,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -7834,7 +7707,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -7843,7 +7715,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
@@ -7858,7 +7730,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: v_alignbit_b32 v0, v0, v6, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v1
; GFX7-NEXT: v_mov_b32_e32 v5, v0
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -7928,11 +7800,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
-; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
; GFX12-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
@@ -7958,7 +7828,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
@@ -7980,11 +7850,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
-; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
; GFX12-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
@@ -8010,7 +7878,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
@@ -8029,13 +7897,12 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -8056,7 +7923,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5]
; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -8072,11 +7939,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start
@@ -8102,7 +7967,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -8121,11 +7986,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start
@@ -8151,7 +8014,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -8170,12 +8033,11 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -8197,7 +8059,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v6, v1
; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -8215,13 +8077,12 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -8241,7 +8102,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -8258,13 +8119,12 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -8285,7 +8145,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v6, v1
; GFX908-NEXT: v_mov_b32_e32 v5, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -8302,11 +8162,10 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -8330,7 +8189,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -8347,7 +8206,6 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v0
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -8356,7 +8214,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
@@ -8371,7 +8229,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: v_alignbit_b32 v3, v3, v6, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v4
; GFX7-NEXT: v_mov_b32_e32 v5, v3
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
@@ -8440,7 +8298,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo
; GFX12-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8455,8 +8312,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024
; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX12-TRUE16-NEXT: ; %bb.2:
@@ -8468,30 +8324,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1
; GFX12-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v8 :: v_dual_min_num_f32 v4, v4, v9
-; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v6, v6, v8 :: v_dual_min_num_f32 v5, v5, v9
+; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5
; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v7
; GFX12-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8506,14 +8362,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v7, v5
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -8521,7 +8377,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_3
; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v5
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -8532,7 +8388,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo
; GFX12-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8547,8 +8402,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024
; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX12-FAKE16-NEXT: ; %bb.2:
@@ -8560,30 +8414,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1
; GFX12-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v9 :: v_dual_min_num_f32 v4, v4, v8
-; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v6, v6, v9 :: v_dual_min_num_f32 v5, v5, v8
+; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX12-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5
; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v7
; GFX12-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8598,14 +8452,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v7, v5
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -8613,14 +8467,13 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_3
; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v5
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -8632,40 +8485,39 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB21_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX942-NEXT: v_lshlrev_b32_e32 v10, 16, v5
; GFX942-NEXT: s_movk_i32 s10, 0x7fff
-; GFX942-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX942-NEXT: s_mov_b32 s11, 0x7060302
; GFX942-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB21_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v7
-; GFX942-NEXT: v_min_f32_e32 v4, v4, v9
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s10
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX942-NEXT: v_min_f32_e32 v6, v6, v10
+; GFX942-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX942-NEXT: v_add3_u32 v7, v7, v6, s10
+; GFX942-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX942-NEXT: s_mov_b64 s[8:9], exec
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v7
-; GFX942-NEXT: v_min_f32_e32 v5, v5, v10
-; GFX942-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX942-NEXT: v_add3_u32 v6, v6, v5, s10
-; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
+; GFX942-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GFX942-NEXT: v_and_b32_e32 v7, 0xffff0000, v9
+; GFX942-NEXT: v_min_f32_e32 v7, v7, v5
+; GFX942-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX942-NEXT: v_add3_u32 v8, v8, v7, s10
+; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc
-; GFX942-NEXT: v_perm_b32 v6, v5, v4, s11
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc
+; GFX942-NEXT: v_perm_b32 v8, v7, v6, s11
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -8678,27 +8530,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB21_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB21_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -8712,8 +8563,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX11-TRUE16-NEXT: ; %bb.2:
@@ -8726,28 +8576,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
; GFX11-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v8 :: v_dual_min_f32 v4, v4, v9
-; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_dual_min_f32 v6, v6, v8 :: v_dual_min_f32 v5, v5, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7
; GFX11-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8761,14 +8611,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v5
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -8778,13 +8628,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v5
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -8798,8 +8647,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX11-FAKE16-NEXT: ; %bb.2:
@@ -8812,28 +8660,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
; GFX11-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v9 :: v_dual_min_f32 v4, v4, v8
-; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_min_f32 v6, v6, v9 :: v_dual_min_f32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v7
; GFX11-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8847,14 +8695,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v5
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -8864,13 +8712,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v5
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -8882,8 +8729,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3]
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
-; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
+; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB21_1
@@ -8895,25 +8741,25 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB21_4 Depth 2
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_min_f32_e32 v4, v4, v8
-; GFX10-NEXT: v_min_f32_e32 v5, v5, v9
-; GFX10-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX10-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX10-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX10-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX10-NEXT: v_min_f32_e32 v5, v5, v8
+; GFX10-NEXT: v_min_f32_e32 v6, v6, v9
+; GFX10-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX10-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX10-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX10-NEXT: v_mov_b32_e32 v4, v5
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX10-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX10-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX10-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v5, v6
+; GFX10-NEXT: v_mov_b32_e32 v6, v7
; GFX10-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
; GFX10-NEXT: v_readfirstlane_b32 s8, v0
@@ -8925,15 +8771,15 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB21_4
; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX10-NEXT: s_mov_b32 exec_lo, s6
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
@@ -8942,13 +8788,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX10-NEXT: s_cbranch_execnz .LBB21_3
; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -8960,38 +8805,37 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB21_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX90A-NEXT: v_lshlrev_b32_e32 v10, 16, v5
; GFX90A-NEXT: s_movk_i32 s14, 0x7fff
-; GFX90A-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX90A-NEXT: s_mov_b32 s15, 0x7060302
; GFX90A-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB21_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v7
-; GFX90A-NEXT: v_min_f32_e32 v4, v4, v9
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s14
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v7
-; GFX90A-NEXT: v_min_f32_e32 v5, v5, v10
-; GFX90A-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX90A-NEXT: v_add3_u32 v6, v6, v5, s14
-; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc
-; GFX90A-NEXT: v_perm_b32 v6, v5, v4, s15
+; GFX90A-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX90A-NEXT: v_min_f32_e32 v6, v6, v10
+; GFX90A-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX90A-NEXT: v_add3_u32 v7, v7, v6, s14
+; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX90A-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v9
+; GFX90A-NEXT: v_min_f32_e32 v7, v7, v5
+; GFX90A-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX90A-NEXT: v_add3_u32 v8, v8, v7, s14
+; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX90A-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc
+; GFX90A-NEXT: v_perm_b32 v8, v7, v6, s15
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -9003,27 +8847,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB21_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB21_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -9035,8 +8878,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB21_1
; GFX908-NEXT: ; %bb.2:
@@ -9050,24 +8892,24 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB21_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX908-NEXT: v_min_f32_e32 v4, v4, v8
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s14
-; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX908-NEXT: v_min_f32_e32 v5, v5, v9
-; GFX908-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX908-NEXT: v_add3_u32 v10, v10, v5, s14
-; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX908-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX908-NEXT: v_min_f32_e32 v5, v5, v8
+; GFX908-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX908-NEXT: v_add3_u32 v6, v6, v5, s14
+; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc
-; GFX908-NEXT: v_perm_b32 v5, v5, v4, s15
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc
+; GFX908-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX908-NEXT: v_min_f32_e32 v6, v6, v9
+; GFX908-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX908-NEXT: v_add3_u32 v10, v10, v6, s14
+; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX908-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX908-NEXT: v_perm_b32 v6, v6, v5, s15
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -9079,27 +8921,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB21_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB21_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -9111,8 +8952,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB21_1
; GFX8-NEXT: ; %bb.2:
@@ -9124,27 +8964,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB21_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX8-NEXT: v_min_f32_e32 v4, v4, v8
-; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX8-NEXT: v_min_f32_e32 v5, v5, v9
-; GFX8-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v5
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10
-; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX8-NEXT: v_min_f32_e32 v5, v5, v8
+; GFX8-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v5
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v5, v5, v4, 16
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX8-NEXT: v_min_f32_e32 v6, v6, v9
+; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10
+; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_alignbit_b32 v6, v6, v5, 16
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -9156,27 +8996,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB21_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB21_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -9187,8 +9026,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3]
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
-; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
+; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB21_1
; GFX7-NEXT: ; %bb.2:
@@ -9196,27 +9034,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX7-NEXT: s_mov_b64 s[6:7], 0
-; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v6
-; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v5
; GFX7-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Loop Header: Depth=1
; GFX7-NEXT: ; Child Loop BB21_4 Depth 2
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v4
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v5
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v7
-; GFX7-NEXT: v_min_f32_e32 v4, v4, v9
-; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v8
+; GFX7-NEXT: v_min_f32_e32 v5, v5, v10
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_min_f32_e32 v7, v7, v10
-; GFX7-NEXT: v_alignbit_b32 v5, v5, v6, 16
-; GFX7-NEXT: v_alignbit_b32 v4, v4, v7, 16
-; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_min_f32_e32 v8, v8, v11
+; GFX7-NEXT: v_alignbit_b32 v6, v6, v7, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v5, v8, 16
+; GFX7-NEXT: v_mov_b32_e32 v9, v6
; GFX7-NEXT: s_mov_b64 s[12:13], exec
-; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: v_mov_b32_e32 v8, v5
; GFX7-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX7-NEXT: ; => This Inner Loop Header: Depth=2
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -9228,23 +9066,23 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v8, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB21_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX7-NEXT: s_mov_b64 exec, s[12:13]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v5
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX7-NEXT: s_cbranch_execnz .LBB21_3
; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v4
+; GFX7-NEXT: v_mov_b32_e32 v0, v8
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -9353,10 +9191,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -9365,7 +9202,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: v_min_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc0 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -9404,10 +9241,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -9416,7 +9252,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1
@@ -9434,10 +9270,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -9446,7 +9281,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_min_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -9463,10 +9298,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -9475,7 +9309,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_min_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
index 3c991cf..afd0f01 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
@@ -782,69 +782,90 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; SDAG-GFX942-LABEL: memcpy_known_medium:
; SDAG-GFX942: ; %bb.0:
; SDAG-GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; SDAG-GFX942-NEXT: s_load_dword s13, s[4:5], 0x34
+; SDAG-GFX942-NEXT: s_load_dword s17, s[4:5], 0x34
; SDAG-GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x44
-; SDAG-GFX942-NEXT: s_load_dword s14, s[4:5], 0x54
-; SDAG-GFX942-NEXT: s_mov_b32 s12, 0
-; SDAG-GFX942-NEXT: s_mov_b32 s5, s12
-; SDAG-GFX942-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-GFX942-NEXT: s_load_dword s12, s[4:5], 0x54
+; SDAG-GFX942-NEXT: s_mov_b32 s16, 0
+; SDAG-GFX942-NEXT: s_mov_b32 s5, s16
; SDAG-GFX942-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-GFX942-NEXT: s_mov_b32 s4, s3
-; SDAG-GFX942-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13]
-; SDAG-GFX942-NEXT: s_mov_b32 s13, s2
+; SDAG-GFX942-NEXT: s_or_b64 s[6:7], s[4:5], s[16:17]
+; SDAG-GFX942-NEXT: s_mov_b32 s17, s2
; SDAG-GFX942-NEXT: s_mov_b32 s2, s1
-; SDAG-GFX942-NEXT: s_mov_b32 s3, s12
-; SDAG-GFX942-NEXT: s_or_b64 s[4:5], s[2:3], s[12:13]
-; SDAG-GFX942-NEXT: s_mov_b32 s13, s14
+; SDAG-GFX942-NEXT: s_mov_b32 s3, s16
+; SDAG-GFX942-NEXT: s_or_b64 s[4:5], s[2:3], s[16:17]
+; SDAG-GFX942-NEXT: s_mov_b32 s17, s12
; SDAG-GFX942-NEXT: s_mov_b32 s2, s11
-; SDAG-GFX942-NEXT: s_or_b64 s[14:15], s[2:3], s[12:13]
-; SDAG-GFX942-NEXT: s_mov_b32 s13, s10
+; SDAG-GFX942-NEXT: s_or_b64 s[14:15], s[2:3], s[16:17]
+; SDAG-GFX942-NEXT: s_mov_b32 s17, s10
; SDAG-GFX942-NEXT: s_mov_b32 s2, s9
-; SDAG-GFX942-NEXT: s_or_b64 s[12:13], s[2:3], s[12:13]
+; SDAG-GFX942-NEXT: s_or_b64 s[12:13], s[2:3], s[16:17]
; SDAG-GFX942-NEXT: .LBB1_1: ; %load-store-loop
; SDAG-GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
-; SDAG-GFX942-NEXT: v_add_u32_e32 v1, s0, v0
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[2:5], v1, s[4:7], 0 offen
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[6:9], v1, s[4:7], 0 offen offset:16
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[10:13], v1, s[4:7], 0 offen offset:32
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[14:17], v1, s[4:7], 0 offen offset:48
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[18:21], v1, s[4:7], 0 offen offset:64
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[22:25], v1, s[4:7], 0 offen offset:80
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[26:29], v1, s[4:7], 0 offen offset:96
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[30:33], v1, s[4:7], 0 offen offset:112
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[34:37], v1, s[4:7], 0 offen offset:128
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[38:41], v1, s[4:7], 0 offen offset:144
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[42:45], v1, s[4:7], 0 offen offset:160
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[46:49], v1, s[4:7], 0 offen offset:176
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[50:53], v1, s[4:7], 0 offen offset:192
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[54:57], v1, s[4:7], 0 offen offset:208
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[58:61], v1, s[4:7], 0 offen offset:224
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 a[0:3], v1, s[4:7], 0 offen offset:240
-; SDAG-GFX942-NEXT: v_add_u32_e32 v62, s8, v0
-; SDAG-GFX942-NEXT: v_add_co_u32_e32 v0, vcc, 0x100, v0
-; SDAG-GFX942-NEXT: s_and_b64 vcc, exec, vcc
-; SDAG-GFX942-NEXT: s_waitcnt vmcnt(0)
-; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v63, a3 ; Reload Reuse
-; SDAG-GFX942-NEXT: scratch_store_dwordx3 off, a[0:2], off ; 12-byte Folded Spill
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[12:15], 0 offen
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v62, s[12:15], 0 offen offset:16
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v62, s[12:15], 0 offen offset:32
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v62, s[12:15], 0 offen offset:48
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v62, s[12:15], 0 offen offset:64
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v62, s[12:15], 0 offen offset:80
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v62, s[12:15], 0 offen offset:96
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v62, s[12:15], 0 offen offset:112
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v62, s[12:15], 0 offen offset:128
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v62, s[12:15], 0 offen offset:144
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v62, s[12:15], 0 offen offset:160
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v62, s[12:15], 0 offen offset:176
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v62, s[12:15], 0 offen offset:192
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v62, s[12:15], 0 offen offset:208
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v62, s[12:15], 0 offen offset:224
-; SDAG-GFX942-NEXT: scratch_load_dwordx3 v[2:4], off, off ; 12-byte Folded Reload
+; SDAG-GFX942-NEXT: s_add_i32 s1, s0, s16
+; SDAG-GFX942-NEXT: v_mov_b32_e32 v60, s1
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[8:11], v60, s[4:7], 0 offen
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[4:7], v60, s[4:7], 0 offen offset:16
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[12:15], v60, s[4:7], 0 offen offset:32
+; SDAG-GFX942-NEXT: s_add_i32 s2, s8, s16
+; SDAG-GFX942-NEXT: v_mov_b32_e32 v0, s2
+; SDAG-GFX942-NEXT: s_addk_i32 s16, 0x100
+; SDAG-GFX942-NEXT: s_cmpk_lt_u32 s16, 0x100
; SDAG-GFX942-NEXT: s_waitcnt vmcnt(0)
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[12:15], 0 offen offset:240
-; SDAG-GFX942-NEXT: s_cbranch_vccnz .LBB1_1
+; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a0, v15 ; Reload Reuse
+; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a1, v14 ; Reload Reuse
+; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a2, v13 ; Reload Reuse
+; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a3, v12 ; Reload Reuse
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[12:15], v60, s[4:7], 0 offen offset:48
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[16:19], v60, s[4:7], 0 offen offset:64
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[20:23], v60, s[4:7], 0 offen offset:80
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[24:27], v60, s[4:7], 0 offen offset:96
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[28:31], v60, s[4:7], 0 offen offset:112
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[32:35], v60, s[4:7], 0 offen offset:128
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[36:39], v60, s[4:7], 0 offen offset:144
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[40:43], v60, s[4:7], 0 offen offset:160
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[44:47], v60, s[4:7], 0 offen offset:176
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[48:51], v60, s[4:7], 0 offen offset:192
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[52:55], v60, s[4:7], 0 offen offset:208
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[56:59], v60, s[4:7], 0 offen offset:224
+; SDAG-GFX942-NEXT: s_nop 0
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[60:63], v60, s[4:7], 0 offen offset:240
+; SDAG-GFX942-NEXT: s_nop 0
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[8:11], v0, s[12:15], 0 offen
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[4:7], v0, s[12:15], 0 offen offset:16
+; SDAG-GFX942-NEXT: s_nop 1
+; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v5, a0 ; Reload Reuse
+; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v4, a1 ; Reload Reuse
+; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v3, a2 ; Reload Reuse
+; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v2, a3 ; Reload Reuse
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v0, s[12:15], 0 offen offset:32
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[12:15], v0, s[12:15], 0 offen offset:48
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[16:19], v0, s[12:15], 0 offen offset:64
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[20:23], v0, s[12:15], 0 offen offset:80
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[24:27], v0, s[12:15], 0 offen offset:96
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[28:31], v0, s[12:15], 0 offen offset:112
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[32:35], v0, s[12:15], 0 offen offset:128
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[36:39], v0, s[12:15], 0 offen offset:144
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[40:43], v0, s[12:15], 0 offen offset:160
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[44:47], v0, s[12:15], 0 offen offset:176
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[48:51], v0, s[12:15], 0 offen offset:192
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[52:55], v0, s[12:15], 0 offen offset:208
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[56:59], v0, s[12:15], 0 offen offset:224
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[60:63], v0, s[12:15], 0 offen offset:240
+; SDAG-GFX942-NEXT: s_cbranch_scc1 .LBB1_1
; SDAG-GFX942-NEXT: ; %bb.2: ; %memcpy-split
; SDAG-GFX942-NEXT: s_endpgm
;
@@ -852,84 +873,87 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; SDAG-GFX1100: ; %bb.0:
; SDAG-GFX1100-NEXT: s_clause 0x3
; SDAG-GFX1100-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; SDAG-GFX1100-NEXT: s_load_b32 s13, s[4:5], 0x34
+; SDAG-GFX1100-NEXT: s_load_b32 s17, s[4:5], 0x34
; SDAG-GFX1100-NEXT: s_load_b128 s[8:11], s[4:5], 0x44
; SDAG-GFX1100-NEXT: s_load_b32 s18, s[4:5], 0x54
-; SDAG-GFX1100-NEXT: s_mov_b32 s12, 0
-; SDAG-GFX1100-NEXT: v_mov_b32_e32 v0, 0
-; SDAG-GFX1100-NEXT: s_mov_b32 s5, s12
-; SDAG-GFX1100-NEXT: s_mov_b32 s15, s12
-; SDAG-GFX1100-NEXT: s_mov_b32 s17, s12
+; SDAG-GFX1100-NEXT: s_mov_b32 s16, 0
+; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; SDAG-GFX1100-NEXT: s_mov_b32 s5, s16
+; SDAG-GFX1100-NEXT: s_mov_b32 s13, s16
+; SDAG-GFX1100-NEXT: s_mov_b32 s15, s16
; SDAG-GFX1100-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-GFX1100-NEXT: s_mov_b32 s4, s3
-; SDAG-GFX1100-NEXT: s_mov_b32 s14, s1
-; SDAG-GFX1100-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13]
-; SDAG-GFX1100-NEXT: s_mov_b32 s13, s2
-; SDAG-GFX1100-NEXT: s_mov_b32 s16, s11
-; SDAG-GFX1100-NEXT: s_or_b64 s[4:5], s[14:15], s[12:13]
-; SDAG-GFX1100-NEXT: s_mov_b32 s13, s18
+; SDAG-GFX1100-NEXT: s_mov_b32 s12, s1
+; SDAG-GFX1100-NEXT: s_or_b64 s[6:7], s[4:5], s[16:17]
+; SDAG-GFX1100-NEXT: s_mov_b32 s17, s2
+; SDAG-GFX1100-NEXT: s_mov_b32 s14, s11
+; SDAG-GFX1100-NEXT: s_or_b64 s[4:5], s[12:13], s[16:17]
+; SDAG-GFX1100-NEXT: s_mov_b32 s17, s18
; SDAG-GFX1100-NEXT: s_mov_b32 s2, s9
-; SDAG-GFX1100-NEXT: s_or_b64 s[14:15], s[16:17], s[12:13]
-; SDAG-GFX1100-NEXT: s_mov_b32 s13, s10
-; SDAG-GFX1100-NEXT: s_mov_b32 s3, s12
+; SDAG-GFX1100-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17]
+; SDAG-GFX1100-NEXT: s_mov_b32 s17, s10
+; SDAG-GFX1100-NEXT: s_mov_b32 s3, s16
; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; SDAG-GFX1100-NEXT: s_or_b64 s[12:13], s[2:3], s[12:13]
+; SDAG-GFX1100-NEXT: s_or_b64 s[12:13], s[2:3], s[16:17]
; SDAG-GFX1100-NEXT: .LBB1_1: ; %load-store-loop
; SDAG-GFX1100-NEXT: ; =>This Inner Loop Header: Depth=1
-; SDAG-GFX1100-NEXT: v_add_nc_u32_e32 v61, s0, v0
-; SDAG-GFX1100-NEXT: v_add_nc_u32_e32 v65, s8, v0
-; SDAG-GFX1100-NEXT: v_add_co_u32 v0, s1, 0x100, v0
-; SDAG-GFX1100-NEXT: s_and_b32 vcc_lo, exec_lo, s1
+; SDAG-GFX1100-NEXT: s_add_i32 s1, s0, s16
+; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; SDAG-GFX1100-NEXT: v_mov_b32_e32 v60, s1
+; SDAG-GFX1100-NEXT: s_add_i32 s1, s8, s16
+; SDAG-GFX1100-NEXT: s_addk_i32 s16, 0x100
+; SDAG-GFX1100-NEXT: v_mov_b32_e32 v64, s1
+; SDAG-GFX1100-NEXT: s_cmpk_lt_u32 s16, 0x100
; SDAG-GFX1100-NEXT: s_clause 0xf
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[1:4], v61, s[4:7], 0 offen
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[5:8], v61, s[4:7], 0 offen offset:16
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[9:12], v61, s[4:7], 0 offen offset:32
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[13:16], v61, s[4:7], 0 offen offset:48
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[17:20], v61, s[4:7], 0 offen offset:64
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[21:24], v61, s[4:7], 0 offen offset:80
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[25:28], v61, s[4:7], 0 offen offset:96
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[29:32], v61, s[4:7], 0 offen offset:112
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[33:36], v61, s[4:7], 0 offen offset:128
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[37:40], v61, s[4:7], 0 offen offset:144
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[41:44], v61, s[4:7], 0 offen offset:160
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[45:48], v61, s[4:7], 0 offen offset:176
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[49:52], v61, s[4:7], 0 offen offset:192
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[53:56], v61, s[4:7], 0 offen offset:208
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[57:60], v61, s[4:7], 0 offen offset:224
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[61:64], v61, s[4:7], 0 offen offset:240
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[0:3], v60, s[4:7], 0 offen
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[4:7], v60, s[4:7], 0 offen offset:16
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[8:11], v60, s[4:7], 0 offen offset:32
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[12:15], v60, s[4:7], 0 offen offset:48
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[16:19], v60, s[4:7], 0 offen offset:64
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[20:23], v60, s[4:7], 0 offen offset:80
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[24:27], v60, s[4:7], 0 offen offset:96
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[28:31], v60, s[4:7], 0 offen offset:112
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[32:35], v60, s[4:7], 0 offen offset:128
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[36:39], v60, s[4:7], 0 offen offset:144
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[40:43], v60, s[4:7], 0 offen offset:160
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[44:47], v60, s[4:7], 0 offen offset:176
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[48:51], v60, s[4:7], 0 offen offset:192
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[52:55], v60, s[4:7], 0 offen offset:208
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[56:59], v60, s[4:7], 0 offen offset:224
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[60:63], v60, s[4:7], 0 offen offset:240
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(15)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[1:4], v65, s[12:15], 0 offen
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[0:3], v64, s[12:15], 0 offen
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(14)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[5:8], v65, s[12:15], 0 offen offset:16
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[4:7], v64, s[12:15], 0 offen offset:16
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(13)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[9:12], v65, s[12:15], 0 offen offset:32
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[8:11], v64, s[12:15], 0 offen offset:32
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(12)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[13:16], v65, s[12:15], 0 offen offset:48
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[12:15], v64, s[12:15], 0 offen offset:48
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(11)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[17:20], v65, s[12:15], 0 offen offset:64
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[16:19], v64, s[12:15], 0 offen offset:64
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(10)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[21:24], v65, s[12:15], 0 offen offset:80
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[20:23], v64, s[12:15], 0 offen offset:80
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(9)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[25:28], v65, s[12:15], 0 offen offset:96
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[24:27], v64, s[12:15], 0 offen offset:96
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(8)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[29:32], v65, s[12:15], 0 offen offset:112
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[28:31], v64, s[12:15], 0 offen offset:112
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(7)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[33:36], v65, s[12:15], 0 offen offset:128
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[32:35], v64, s[12:15], 0 offen offset:128
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(6)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[37:40], v65, s[12:15], 0 offen offset:144
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[36:39], v64, s[12:15], 0 offen offset:144
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(5)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[41:44], v65, s[12:15], 0 offen offset:160
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[40:43], v64, s[12:15], 0 offen offset:160
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(4)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[45:48], v65, s[12:15], 0 offen offset:176
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[44:47], v64, s[12:15], 0 offen offset:176
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(3)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[49:52], v65, s[12:15], 0 offen offset:192
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[48:51], v64, s[12:15], 0 offen offset:192
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(2)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[53:56], v65, s[12:15], 0 offen offset:208
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[52:55], v64, s[12:15], 0 offen offset:208
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(1)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[57:60], v65, s[12:15], 0 offen offset:224
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[56:59], v64, s[12:15], 0 offen offset:224
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(0)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[61:64], v65, s[12:15], 0 offen offset:240
-; SDAG-GFX1100-NEXT: s_cbranch_vccnz .LBB1_1
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[60:63], v64, s[12:15], 0 offen offset:240
+; SDAG-GFX1100-NEXT: s_cbranch_scc1 .LBB1_1
; SDAG-GFX1100-NEXT: ; %bb.2: ; %memcpy-split
; SDAG-GFX1100-NEXT: s_endpgm
;
@@ -957,52 +981,50 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; GISEL-GFX942-NEXT: s_mov_b32 s2, s7
; GISEL-GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-GFX942-NEXT: s_or_b64 s[6:7], s[6:7], s[2:3]
-; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, s16
+; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, 0x100
+; GISEL-GFX942-NEXT: v_mov_b32_e32 v1, s16
; GISEL-GFX942-NEXT: .LBB1_1: ; %load-store-loop
; GISEL-GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
-; GISEL-GFX942-NEXT: v_add_u32_e32 v1, s0, v0
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[2:5], v1, s[8:11], 0 offen
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[6:9], v1, s[8:11], 0 offen offset:16
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[10:13], v1, s[8:11], 0 offen offset:32
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[14:17], v1, s[8:11], 0 offen offset:48
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[18:21], v1, s[8:11], 0 offen offset:64
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[22:25], v1, s[8:11], 0 offen offset:80
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[26:29], v1, s[8:11], 0 offen offset:96
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[30:33], v1, s[8:11], 0 offen offset:112
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[34:37], v1, s[8:11], 0 offen offset:128
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[38:41], v1, s[8:11], 0 offen offset:144
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[42:45], v1, s[8:11], 0 offen offset:160
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[46:49], v1, s[8:11], 0 offen offset:176
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[50:53], v1, s[8:11], 0 offen offset:192
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[54:57], v1, s[8:11], 0 offen offset:208
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[58:61], v1, s[8:11], 0 offen offset:224
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 a[0:3], v1, s[8:11], 0 offen offset:240
-; GISEL-GFX942-NEXT: v_add_u32_e32 v62, s12, v0
-; GISEL-GFX942-NEXT: v_add_co_u32_e32 v0, vcc, 0x100, v0
-; GISEL-GFX942-NEXT: s_xor_b64 s[2:3], vcc, -1
-; GISEL-GFX942-NEXT: s_xor_b64 s[2:3], s[2:3], -1
-; GISEL-GFX942-NEXT: s_and_b64 vcc, s[2:3], exec
+; GISEL-GFX942-NEXT: v_add_u32_e32 v62, s0, v1
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[2:5], v62, s[8:11], 0 offen
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[6:9], v62, s[8:11], 0 offen offset:16
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[10:13], v62, s[8:11], 0 offen offset:32
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[14:17], v62, s[8:11], 0 offen offset:48
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[18:21], v62, s[8:11], 0 offen offset:64
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[22:25], v62, s[8:11], 0 offen offset:80
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[26:29], v62, s[8:11], 0 offen offset:96
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[30:33], v62, s[8:11], 0 offen offset:112
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[34:37], v62, s[8:11], 0 offen offset:128
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[38:41], v62, s[8:11], 0 offen offset:144
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[42:45], v62, s[8:11], 0 offen offset:160
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[46:49], v62, s[8:11], 0 offen offset:176
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[50:53], v62, s[8:11], 0 offen offset:192
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[54:57], v62, s[8:11], 0 offen offset:208
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[58:61], v62, s[8:11], 0 offen offset:224
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 a[0:3], v62, s[8:11], 0 offen offset:240
+; GISEL-GFX942-NEXT: v_add_u32_e32 v63, s12, v1
+; GISEL-GFX942-NEXT: v_add_u32_e32 v1, 0x100, v1
+; GISEL-GFX942-NEXT: v_cmp_lt_u32_e32 vcc, v1, v0
; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0)
-; GISEL-GFX942-NEXT: v_accvgpr_read_b32 v63, a3 ; Reload Reuse
-; GISEL-GFX942-NEXT: scratch_store_dwordx3 off, a[0:2], off ; 12-byte Folded Spill
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[4:7], 0 offen
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v62, s[4:7], 0 offen offset:16
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v62, s[4:7], 0 offen offset:32
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v62, s[4:7], 0 offen offset:48
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v62, s[4:7], 0 offen offset:64
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v62, s[4:7], 0 offen offset:80
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v62, s[4:7], 0 offen offset:96
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v62, s[4:7], 0 offen offset:112
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v62, s[4:7], 0 offen offset:128
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v62, s[4:7], 0 offen offset:144
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v62, s[4:7], 0 offen offset:160
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v62, s[4:7], 0 offen offset:176
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v62, s[4:7], 0 offen offset:192
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v62, s[4:7], 0 offen offset:208
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v62, s[4:7], 0 offen offset:224
-; GISEL-GFX942-NEXT: scratch_load_dwordx3 v[2:4], off, off ; 12-byte Folded Reload
+; GISEL-GFX942-NEXT: scratch_store_dwordx4 off, a[0:3], off ; 16-byte Folded Spill
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v63, s[4:7], 0 offen
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v63, s[4:7], 0 offen offset:16
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v63, s[4:7], 0 offen offset:32
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v63, s[4:7], 0 offen offset:48
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v63, s[4:7], 0 offen offset:64
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v63, s[4:7], 0 offen offset:80
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v63, s[4:7], 0 offen offset:96
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v63, s[4:7], 0 offen offset:112
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v63, s[4:7], 0 offen offset:128
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v63, s[4:7], 0 offen offset:144
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v63, s[4:7], 0 offen offset:160
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v63, s[4:7], 0 offen offset:176
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v63, s[4:7], 0 offen offset:192
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v63, s[4:7], 0 offen offset:208
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v63, s[4:7], 0 offen offset:224
+; GISEL-GFX942-NEXT: scratch_load_dwordx4 v[2:5], off, off ; 16-byte Folded Reload
; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0)
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[4:7], 0 offen offset:240
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v63, s[4:7], 0 offen offset:240
; GISEL-GFX942-NEXT: s_cbranch_vccnz .LBB1_1
; GISEL-GFX942-NEXT: ; %bb.2: ; %memcpy-split
; GISEL-GFX942-NEXT: s_endpgm
@@ -1037,8 +1059,7 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; GISEL-GFX1100-NEXT: ; =>This Inner Loop Header: Depth=1
; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v61, s0, v0
; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v65, s8, v0
-; GISEL-GFX1100-NEXT: v_add_co_u32 v0, s1, 0x100, v0
-; GISEL-GFX1100-NEXT: s_xor_b32 s1, s1, -1
+; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v0, 0x100, v0
; GISEL-GFX1100-NEXT: s_clause 0xf
; GISEL-GFX1100-NEXT: buffer_load_b128 v[1:4], v61, s[4:7], 0 offen
; GISEL-GFX1100-NEXT: buffer_load_b128 v[5:8], v61, s[4:7], 0 offen offset:16
@@ -1056,7 +1077,6 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; GISEL-GFX1100-NEXT: buffer_load_b128 v[53:56], v61, s[4:7], 0 offen offset:208
; GISEL-GFX1100-NEXT: buffer_load_b128 v[57:60], v61, s[4:7], 0 offen offset:224
; GISEL-GFX1100-NEXT: buffer_load_b128 v[61:64], v61, s[4:7], 0 offen offset:240
-; GISEL-GFX1100-NEXT: s_xor_b32 s1, s1, -1
; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX1100-NEXT: buffer_store_b128 v[1:4], v65, s[12:15], 0 offen
; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(14)
@@ -1089,7 +1109,7 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; GISEL-GFX1100-NEXT: buffer_store_b128 v[57:60], v65, s[12:15], 0 offen offset:224
; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(0)
; GISEL-GFX1100-NEXT: buffer_store_b128 v[61:64], v65, s[12:15], 0 offen offset:240
-; GISEL-GFX1100-NEXT: s_and_b32 vcc_lo, exec_lo, s1
+; GISEL-GFX1100-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x100, v0
; GISEL-GFX1100-NEXT: s_cbranch_vccnz .LBB1_1
; GISEL-GFX1100-NEXT: ; %bb.2: ; %memcpy-split
; GISEL-GFX1100-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir b/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir
index 029aa39..ce1ea4d 100644
--- a/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir
@@ -128,13 +128,13 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub0_sub1:areg_128 = COPY [[COPY]]
; CHECK-NEXT: [[COPY2:%[0-9]+]].sub2_sub3:areg_128 = COPY [[COPY1]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY2]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY2]]
; CHECK-NEXT: SI_RETURN
%0:vreg_64 = COPY $vgpr0_vgpr1
%1:vreg_64 = COPY $vgpr2_vgpr3
undef %2.sub0_sub1:areg_128 = COPY %0
%2.sub2_sub3:areg_128 = COPY %1
- INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2
SI_RETURN
...
@@ -153,13 +153,13 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[COPY]]
; CHECK-NEXT: [[COPY2:%[0-9]+]].sub2_sub3:areg_128_align2 = COPY [[COPY1]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY2]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY2]]
; CHECK-NEXT: SI_RETURN
%0:vreg_64 = COPY $vgpr0_vgpr1
%1:vreg_64 = COPY $vgpr2_vgpr3
undef %2.sub0_sub1:areg_128_align2 = COPY %0
%2.sub2_sub3:areg_128_align2 = COPY %1
- INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2
SI_RETURN
...
@@ -398,14 +398,14 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128 = COPY [[COPY]]
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128 = COPY [[COPY]]
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128 = COPY [[COPY]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
%0:vgpr_32 = COPY $vgpr0
undef %1.sub0:areg_128 = COPY %0
%1.sub1:areg_128 = COPY %0
%1.sub2:areg_128 = COPY %0
%1.sub3:areg_128 = COPY %0
- INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %1
+ INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %1
SI_RETURN
...
@@ -425,14 +425,14 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128_align2 = COPY [[COPY]]
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128_align2 = COPY [[COPY]]
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128_align2 = COPY [[COPY]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
%0:vgpr_32 = COPY $vgpr0
undef %1.sub0:areg_128_align2 = COPY %0
%1.sub1:areg_128_align2 = COPY %0
%1.sub2:areg_128_align2 = COPY %0
%1.sub3:areg_128_align2 = COPY %0
- INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %1
+ INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %1
SI_RETURN
...
@@ -641,13 +641,13 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]].sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3
; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128 = COPY [[COPY]].sub0_sub1
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128 = COPY [[COPY]].sub2_sub3
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
undef %0.sub0_sub1:vreg_128 =COPY $vgpr0_vgpr1
%0.sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3
undef %2.sub0_sub1:areg_128 = COPY %0.sub0_sub1
%2.sub2_sub3:areg_128 = COPY %0.sub2_sub3
- INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2
SI_RETURN
...
@@ -668,13 +668,13 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_128 = COPY $vgpr2_vgpr3
; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[COPY]].sub0
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128_align2 = COPY [[COPY]].sub1
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
undef %0.sub0:vreg_128 =COPY $vgpr0_vgpr1
%0.sub1:vreg_128 = COPY $vgpr2_vgpr3
undef %2.sub0_sub1:areg_128_align2 = COPY %0.sub0
%2.sub2_sub3:areg_128_align2 = COPY %0.sub1
- INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2
SI_RETURN
...
@@ -890,14 +890,14 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128 = COPY [[COPY]].sub0
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128 = COPY [[COPY]].sub0
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128 = COPY [[COPY]].sub0
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
undef %0.sub0:vreg_64 = COPY $vgpr0
undef %1.sub0:areg_128 = COPY %0.sub0
%1.sub1:areg_128 = COPY %0.sub0
%1.sub2:areg_128 = COPY %0.sub0
%1.sub3:areg_128 = COPY %0.sub0
- INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %1
+ INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %1
SI_RETURN
...
@@ -917,14 +917,14 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128_align2 = COPY [[COPY]].sub0
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128_align2 = COPY [[COPY]].sub0
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128_align2 = COPY [[COPY]].sub0
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
undef %0.sub0:vreg_64 = COPY $vgpr0
undef %1.sub0:areg_128_align2 = COPY %0.sub0
%1.sub1:areg_128_align2 = COPY %0.sub0
%1.sub2:areg_128_align2 = COPY %0.sub0
%1.sub3:areg_128_align2 = COPY %0.sub0
- INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %1
+ INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %1
SI_RETURN
...
@@ -1051,13 +1051,13 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]].sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3
; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128 = COPY [[COPY]].sub0_sub1
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128 = COPY [[COPY]].sub2_sub3
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
undef %0.sub0_sub1:vreg_128 = COPY $vgpr0_vgpr1
%0.sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3
undef %2.sub0_sub1:areg_128 = COPY %0.sub0_sub1
%2.sub2_sub3:areg_128 = COPY %0.sub2_sub3
- INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2
SI_RETURN
...
@@ -1076,13 +1076,13 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]].sub2_sub3:vreg_128_align2 = COPY $vgpr2_vgpr3
; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[COPY]].sub0_sub1
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128_align2 = COPY [[COPY]].sub2_sub3
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
undef %0.sub0_sub1:vreg_128_align2 = COPY $vgpr0_vgpr1
%0.sub2_sub3:vreg_128_align2 = COPY $vgpr2_vgpr3
undef %2.sub0_sub1:areg_128_align2 = COPY %0.sub0_sub1
%2.sub2_sub3:areg_128_align2 = COPY %0.sub2_sub3
- INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2
SI_RETURN
...
@@ -1358,11 +1358,11 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-NEXT: [[COPY1:%[0-9]+]]:areg_128 = COPY [[COPY]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
%0:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%2:areg_128 = COPY %0
- INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2
SI_RETURN
...
@@ -1379,11 +1379,11 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_128_align2 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-NEXT: [[COPY1:%[0-9]+]]:areg_128_align2 = COPY [[COPY]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]]
; CHECK-NEXT: SI_RETURN
%0:vreg_128_align2 = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%2:areg_128_align2 = COPY %0
- INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2
+ INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2
SI_RETURN
...
diff --git a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
index 5134159..0fc54ae 100644
--- a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
@@ -619,43 +619,43 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: s_mov_b64 s[8:9], 0
; GISEL-NEXT: v_ashrrev_i32_e32 v18, 31, v7
; GISEL-NEXT: v_ashrrev_i32_e32 v19, 31, v15
-; GISEL-NEXT: v_mov_b32_e32 v10, 0x7f
-; GISEL-NEXT: v_mov_b32_e32 v11, 0
+; GISEL-NEXT: v_mov_b32_e32 v16, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v17, 0
; GISEL-NEXT: v_xor_b32_e32 v0, v18, v4
; GISEL-NEXT: v_xor_b32_e32 v1, v18, v5
; GISEL-NEXT: v_xor_b32_e32 v2, v18, v6
; GISEL-NEXT: v_xor_b32_e32 v3, v18, v7
; GISEL-NEXT: v_xor_b32_e32 v4, v19, v12
; GISEL-NEXT: v_xor_b32_e32 v5, v19, v13
-; GISEL-NEXT: v_xor_b32_e32 v14, v19, v14
-; GISEL-NEXT: v_xor_b32_e32 v15, v19, v15
+; GISEL-NEXT: v_xor_b32_e32 v12, v19, v14
+; GISEL-NEXT: v_xor_b32_e32 v13, v19, v15
; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v0, v18
; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v1, v18, vcc
; GISEL-NEXT: v_sub_i32_e64 v20, s[4:5], v4, v19
; GISEL-NEXT: v_subb_u32_e64 v21, s[4:5], v5, v19, s[4:5]
-; GISEL-NEXT: v_subb_u32_e32 v12, vcc, v2, v18, vcc
-; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v18, vcc
-; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v14, v19, s[4:5]
-; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v15, v19, vcc
-; GISEL-NEXT: v_ffbh_u32_e32 v14, v21
-; GISEL-NEXT: v_ffbh_u32_e32 v15, v20
-; GISEL-NEXT: v_ffbh_u32_e32 v16, v7
-; GISEL-NEXT: v_ffbh_u32_e32 v17, v6
+; GISEL-NEXT: v_subb_u32_e32 v10, vcc, v2, v18, vcc
+; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v3, v18, vcc
+; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v12, v19, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v13, v19, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v12, v21
+; GISEL-NEXT: v_ffbh_u32_e32 v13, v20
+; GISEL-NEXT: v_ffbh_u32_e32 v14, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v15, v6
; GISEL-NEXT: v_or_b32_e32 v0, v20, v4
; GISEL-NEXT: v_or_b32_e32 v1, v21, v5
-; GISEL-NEXT: v_or_b32_e32 v2, v6, v12
-; GISEL-NEXT: v_or_b32_e32 v3, v7, v13
-; GISEL-NEXT: v_add_i32_e32 v15, vcc, 32, v15
+; GISEL-NEXT: v_or_b32_e32 v2, v6, v10
+; GISEL-NEXT: v_or_b32_e32 v3, v7, v11
+; GISEL-NEXT: v_add_i32_e32 v13, vcc, 32, v13
; GISEL-NEXT: v_ffbh_u32_e32 v26, v5
; GISEL-NEXT: v_ffbh_u32_e32 v27, v4
-; GISEL-NEXT: v_add_i32_e32 v17, vcc, 32, v17
-; GISEL-NEXT: v_ffbh_u32_e32 v28, v13
-; GISEL-NEXT: v_ffbh_u32_e32 v29, v12
+; GISEL-NEXT: v_add_i32_e32 v15, vcc, 32, v15
+; GISEL-NEXT: v_ffbh_u32_e32 v28, v11
+; GISEL-NEXT: v_ffbh_u32_e32 v29, v10
; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3]
-; GISEL-NEXT: v_min_u32_e32 v0, v14, v15
+; GISEL-NEXT: v_min_u32_e32 v0, v12, v13
; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v27
-; GISEL-NEXT: v_min_u32_e32 v2, v16, v17
+; GISEL-NEXT: v_min_u32_e32 v2, v14, v15
; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v29
; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0
; GISEL-NEXT: v_min_u32_e32 v1, v26, v1
@@ -665,32 +665,32 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_cndmask_b32_e64 v14, 0, 1, s[4:5]
; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13]
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v0, v1
; GISEL-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, vcc
; GISEL-NEXT: v_subb_u32_e64 v0, s[4:5], 0, 0, s[4:5]
; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5]
-; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[10:11]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[16:17]
; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
-; GISEL-NEXT: v_xor_b32_e32 v10, 0x7f, v2
+; GISEL-NEXT: v_xor_b32_e32 v12, 0x7f, v2
; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[0:1]
; GISEL-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
-; GISEL-NEXT: v_or_b32_e32 v10, v10, v0
-; GISEL-NEXT: v_or_b32_e32 v11, v3, v1
+; GISEL-NEXT: v_or_b32_e32 v12, v12, v0
+; GISEL-NEXT: v_or_b32_e32 v13, v3, v1
; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
; GISEL-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc
-; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT: v_or_b32_e32 v11, v14, v15
-; GISEL-NEXT: v_and_b32_e32 v14, 1, v11
-; GISEL-NEXT: v_or_b32_e32 v10, v11, v10
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13]
+; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v13, v14, v15
+; GISEL-NEXT: v_and_b32_e32 v14, 1, v13
+; GISEL-NEXT: v_or_b32_e32 v12, v13, v12
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; GISEL-NEXT: v_cndmask_b32_e64 v14, v6, 0, vcc
-; GISEL-NEXT: v_and_b32_e32 v16, 1, v10
+; GISEL-NEXT: v_and_b32_e32 v16, 1, v12
; GISEL-NEXT: v_cndmask_b32_e64 v15, v7, 0, vcc
-; GISEL-NEXT: v_cndmask_b32_e64 v10, v12, 0, vcc
-; GISEL-NEXT: v_cndmask_b32_e64 v11, v13, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v12, v10, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v13, v11, 0, vcc
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
@@ -703,22 +703,22 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_addc_u32_e64 v28, vcc, 0, v0, s[4:5]
; GISEL-NEXT: v_addc_u32_e32 v29, vcc, 0, v1, vcc
; GISEL-NEXT: v_add_i32_e64 v14, s[4:5], v30, v2
-; GISEL-NEXT: v_sub_i32_e64 v10, s[4:5], 64, v30
+; GISEL-NEXT: v_sub_i32_e64 v12, s[4:5], 64, v30
; GISEL-NEXT: v_lshl_b64 v[0:1], v[6:7], v30
-; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], v30
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[10:11], v30
; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
-; GISEL-NEXT: v_lshr_b64 v[10:11], v[6:7], v10
+; GISEL-NEXT: v_lshr_b64 v[12:13], v[6:7], v12
; GISEL-NEXT: v_lshl_b64 v[16:17], v[6:7], v14
; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v30
; GISEL-NEXT: v_cndmask_b32_e32 v14, 0, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v15, 0, v1, vcc
-; GISEL-NEXT: v_or_b32_e32 v0, v10, v2
-; GISEL-NEXT: v_or_b32_e32 v1, v11, v3
+; GISEL-NEXT: v_or_b32_e32 v0, v12, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v13, v3
; GISEL-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
-; GISEL-NEXT: v_cndmask_b32_e32 v10, v0, v12, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v11, v1, v13, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v0, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v13, v1, v11, vcc
; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
; GISEL-NEXT: v_mov_b32_e32 v0, s8
; GISEL-NEXT: v_mov_b32_e32 v1, s9
@@ -730,26 +730,26 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: ; %bb.8: ; %udiv-preheader
; GISEL-NEXT: v_add_i32_e32 v32, vcc, 0xffffffc0, v26
; GISEL-NEXT: v_sub_i32_e32 v16, vcc, 64, v26
-; GISEL-NEXT: v_lshr_b64 v[0:1], v[12:13], v26
+; GISEL-NEXT: v_lshr_b64 v[0:1], v[10:11], v26
; GISEL-NEXT: v_lshr_b64 v[2:3], v[6:7], v26
; GISEL-NEXT: s_mov_b64 s[4:5], 0
; GISEL-NEXT: v_add_i32_e32 v30, vcc, -1, v20
; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v21, vcc
-; GISEL-NEXT: v_lshl_b64 v[16:17], v[12:13], v16
-; GISEL-NEXT: v_lshr_b64 v[12:13], v[12:13], v32
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[10:11], v16
+; GISEL-NEXT: v_lshr_b64 v[10:11], v[10:11], v32
; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v4, vcc
; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v5, vcc
; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
; GISEL-NEXT: v_or_b32_e32 v2, v2, v16
; GISEL-NEXT: v_or_b32_e32 v3, v3, v17
; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v12, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v13, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v16, 0, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v17, 0, v1, vcc
; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26
-; GISEL-NEXT: v_cndmask_b32_e32 v12, v2, v6, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v13, v3, v7, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v2, v6, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v3, v7, vcc
; GISEL-NEXT: v_mov_b32_e32 v7, 0
; GISEL-NEXT: v_mov_b32_e32 v0, s4
; GISEL-NEXT: v_mov_b32_e32 v1, s5
@@ -757,20 +757,20 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_mov_b32_e32 v3, s7
; GISEL-NEXT: .LBB0_9: ; %udiv-do-while
; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
-; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], 1
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[10:11], 1
; GISEL-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
-; GISEL-NEXT: v_lshrrev_b32_e32 v6, 31, v13
-; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v11
-; GISEL-NEXT: v_lshl_b64 v[12:13], v[14:15], 1
-; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v6, 31, v11
+; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v13
+; GISEL-NEXT: v_lshl_b64 v[10:11], v[14:15], 1
+; GISEL-NEXT: v_lshl_b64 v[12:13], v[12:13], 1
; GISEL-NEXT: v_lshrrev_b32_e32 v14, 31, v15
; GISEL-NEXT: v_add_i32_e32 v26, vcc, -1, v26
; GISEL-NEXT: v_addc_u32_e32 v27, vcc, -1, v27, vcc
; GISEL-NEXT: v_or_b32_e32 v16, v16, v6
; GISEL-NEXT: v_or_b32_e32 v2, v2, v34
-; GISEL-NEXT: v_or_b32_e32 v10, v10, v14
-; GISEL-NEXT: v_or_b32_e32 v14, v0, v12
-; GISEL-NEXT: v_or_b32_e32 v15, v1, v13
+; GISEL-NEXT: v_or_b32_e32 v12, v12, v14
+; GISEL-NEXT: v_or_b32_e32 v14, v0, v10
+; GISEL-NEXT: v_or_b32_e32 v15, v1, v11
; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc
; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc
; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v30, v2
@@ -783,14 +783,14 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v6
; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GISEL-NEXT: v_and_b32_e32 v6, 1, v0
-; GISEL-NEXT: v_and_b32_e32 v12, v0, v20
-; GISEL-NEXT: v_and_b32_e32 v13, v0, v21
+; GISEL-NEXT: v_and_b32_e32 v10, v0, v20
+; GISEL-NEXT: v_and_b32_e32 v11, v0, v21
; GISEL-NEXT: v_and_b32_e32 v34, v0, v4
; GISEL-NEXT: v_and_b32_e32 v35, v0, v5
; GISEL-NEXT: v_mov_b32_e32 v0, v6
; GISEL-NEXT: v_mov_b32_e32 v1, v7
-; GISEL-NEXT: v_sub_i32_e32 v12, vcc, v2, v12
-; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v13, vcc
+; GISEL-NEXT: v_sub_i32_e32 v10, vcc, v2, v10
+; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v3, v11, vcc
; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v16, v34, vcc
; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v17, v35, vcc
; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
@@ -800,9 +800,9 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: .LBB0_11: ; %Flow11
; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
; GISEL-NEXT: v_lshl_b64 v[2:3], v[14:15], 1
-; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; GISEL-NEXT: v_lshl_b64 v[12:13], v[12:13], 1
; GISEL-NEXT: v_lshrrev_b32_e32 v4, 31, v15
-; GISEL-NEXT: v_or_b32_e32 v10, v10, v4
+; GISEL-NEXT: v_or_b32_e32 v12, v12, v4
; GISEL-NEXT: v_or_b32_e32 v14, v0, v2
; GISEL-NEXT: v_or_b32_e32 v15, v1, v3
; GISEL-NEXT: .LBB0_12: ; %Flow12
@@ -815,8 +815,8 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_xor_b32_e32 v6, v9, v3
; GISEL-NEXT: v_xor_b32_e32 v4, v14, v7
; GISEL-NEXT: v_xor_b32_e32 v5, v15, v7
-; GISEL-NEXT: v_xor_b32_e32 v8, v10, v7
-; GISEL-NEXT: v_xor_b32_e32 v9, v11, v7
+; GISEL-NEXT: v_xor_b32_e32 v8, v12, v7
+; GISEL-NEXT: v_xor_b32_e32 v9, v13, v7
; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v3
; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
; GISEL-NEXT: v_sub_i32_e64 v4, s[4:5], v4, v7
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
index e7af746..e042157 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -20,7 +20,8 @@ define i128 @fptosi_f64_to_i128(double %x) {
; SDAG-NEXT: s_cbranch_execz .LBB0_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
-; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc
+; SDAG-NEXT: v_mov_b32_e32 v1, -1
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc
; SDAG-NEXT: s_movk_i32 s6, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc
@@ -386,7 +387,8 @@ define i128 @fptoui_f64_to_i128(double %x) {
; SDAG-NEXT: s_cbranch_execz .LBB1_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
-; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc
+; SDAG-NEXT: v_mov_b32_e32 v1, -1
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc
; SDAG-NEXT: s_movk_i32 s6, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc
@@ -749,9 +751,10 @@ define i128 @fptosi_f32_to_i128(float %x) {
; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
; SDAG-NEXT: s_cbranch_execz .LBB2_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
-; SDAG-NEXT: v_mov_b32_e32 v6, 0
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
-; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
+; SDAG-NEXT: v_mov_b32_e32 v1, -1
+; SDAG-NEXT: v_mov_b32_e32 v6, 0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
; SDAG-NEXT: s_movk_i32 s6, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
@@ -1100,9 +1103,10 @@ define i128 @fptoui_f32_to_i128(float %x) {
; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
; SDAG-NEXT: s_cbranch_execz .LBB3_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
-; SDAG-NEXT: v_mov_b32_e32 v6, 0
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
-; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
+; SDAG-NEXT: v_mov_b32_e32 v1, -1
+; SDAG-NEXT: v_mov_b32_e32 v6, 0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
; SDAG-NEXT: s_movk_i32 s6, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
@@ -1489,9 +1493,10 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
; SDAG-NEXT: s_cbranch_execz .LBB6_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
-; SDAG-NEXT: v_mov_b32_e32 v6, 0
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
-; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
+; SDAG-NEXT: v_mov_b32_e32 v1, -1
+; SDAG-NEXT: v_mov_b32_e32 v6, 0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
; SDAG-NEXT: s_movk_i32 s6, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
@@ -1836,9 +1841,10 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
; SDAG-NEXT: s_cbranch_execz .LBB7_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
-; SDAG-NEXT: v_mov_b32_e32 v6, 0
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
-; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
+; SDAG-NEXT: v_mov_b32_e32 v1, -1
+; SDAG-NEXT: v_mov_b32_e32 v6, 0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
; SDAG-NEXT: s_movk_i32 s6, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
index 05403f0..a50791e 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
@@ -7575,15 +7575,13 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; GFX7-NEXT: v_mov_b32_e32 v5, v3
; GFX7-NEXT: v_mov_b32_e32 v4, v2
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB38_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
; GFX7-NEXT: v_mov_b32_e32 v0, v8
; GFX7-NEXT: v_mov_b32_e32 v1, v9
@@ -7593,7 +7591,9 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v11, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB38_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7609,15 +7609,13 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; GFX6-NEXT: v_mov_b32_e32 v5, v3
; GFX6-NEXT: v_mov_b32_e32 v4, v2
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB38_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, v8
@@ -7628,7 +7626,9 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_mov_b32_e32 v11, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB38_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7809,15 +7809,13 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040
+; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040
; GFX7-NEXT: v_mov_b32_e32 v5, v3
; GFX7-NEXT: v_mov_b32_e32 v4, v2
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB39_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
; GFX7-NEXT: v_mov_b32_e32 v0, v8
; GFX7-NEXT: v_mov_b32_e32 v1, v9
@@ -7827,7 +7825,9 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v11, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB39_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7843,15 +7843,13 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040
+; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040
; GFX6-NEXT: v_mov_b32_e32 v5, v3
; GFX6-NEXT: v_mov_b32_e32 v4, v2
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB39_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, v8
@@ -7862,7 +7860,9 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_mov_b32_e32 v11, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB39_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8039,34 +8039,32 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_neg__amdgpu_no_fine_g
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: s_movk_i32 s4, 0xf800
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
; GFX7-NEXT: s_mov_b32 s5, -1
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s6, 0
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
-; GFX7-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0
; GFX7-NEXT: v_mov_b32_e32 v5, v3
; GFX7-NEXT: v_mov_b32_e32 v4, v2
-; GFX7-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc
+; GFX7-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
; GFX7-NEXT: .LBB40_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
-; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v8
-; GFX7-NEXT: v_mov_b32_e32 v1, v9
-; GFX7-NEXT: v_mov_b32_e32 v2, v10
-; GFX7-NEXT: v_mov_b32_e32 v3, v11
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB40_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8077,35 +8075,33 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_neg__amdgpu_no_fine_g
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: s_movk_i32 s4, 0xf800
-; GFX6-NEXT: v_mov_b32_e32 v7, v1
-; GFX6-NEXT: v_mov_b32_e32 v6, v0
; GFX6-NEXT: s_mov_b32 s5, -1
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, 0
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
-; GFX6-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64
+; GFX6-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0
; GFX6-NEXT: v_mov_b32_e32 v5, v3
; GFX6-NEXT: v_mov_b32_e32 v4, v2
-; GFX6-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc
+; GFX6-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
; GFX6-NEXT: .LBB40_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
-; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
+; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, v8
-; GFX6-NEXT: v_mov_b32_e32 v1, v9
-; GFX6-NEXT: v_mov_b32_e32 v2, v10
-; GFX6-NEXT: v_mov_b32_e32 v3, v11
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB40_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll
index ac223fd..311faac 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll
@@ -4203,25 +4203,25 @@ define double @global_agent_atomic_fmax_ret_f64__amdgpu_no_remote_memory(ptr add
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
-; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64
+; GFX7-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3]
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
-; GFX7-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
-; GFX7-NEXT: v_max_f64 v[8:9], v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v8
-; GFX7-NEXT: v_mov_b32_e32 v1, v9
-; GFX7-NEXT: v_mov_b32_e32 v2, v10
-; GFX7-NEXT: v_mov_b32_e32 v3, v11
+; GFX7-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9]
+; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB24_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4237,26 +4237,25 @@ define double @global_agent_atomic_fmax_ret_f64__amdgpu_no_remote_memory(ptr add
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
-; GFX6-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64
+; GFX6-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3]
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
-; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
-; GFX6-NEXT: v_max_f64 v[8:9], v[0:1], v[6:7]
-; GFX6-NEXT: v_mov_b32_e32 v0, v8
-; GFX6-NEXT: v_mov_b32_e32 v1, v9
-; GFX6-NEXT: v_mov_b32_e32 v2, v10
-; GFX6-NEXT: v_mov_b32_e32 v3, v11
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GFX6-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9]
+; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[10:11]
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB24_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll
index 5653f85..e2808ee 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll
@@ -4203,25 +4203,25 @@ define double @global_agent_atomic_fmin_ret_f64__amdgpu_no_remote_memory(ptr add
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
-; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64
+; GFX7-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3]
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
-; GFX7-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
-; GFX7-NEXT: v_min_f64 v[8:9], v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v8
-; GFX7-NEXT: v_mov_b32_e32 v1, v9
-; GFX7-NEXT: v_mov_b32_e32 v2, v10
-; GFX7-NEXT: v_mov_b32_e32 v3, v11
+; GFX7-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9]
+; GFX7-NEXT: v_min_f64 v[6:7], v[0:1], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB24_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4237,26 +4237,25 @@ define double @global_agent_atomic_fmin_ret_f64__amdgpu_no_remote_memory(ptr add
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
-; GFX6-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64
+; GFX6-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3]
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
-; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
-; GFX6-NEXT: v_min_f64 v[8:9], v[0:1], v[6:7]
-; GFX6-NEXT: v_mov_b32_e32 v0, v8
-; GFX6-NEXT: v_mov_b32_e32 v1, v9
-; GFX6-NEXT: v_mov_b32_e32 v2, v10
-; GFX6-NEXT: v_mov_b32_e32 v3, v11
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GFX6-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9]
+; GFX6-NEXT: v_min_f64 v[6:7], v[0:1], v[10:11]
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB24_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
index f0e1615..11f0f38 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
@@ -3913,15 +3913,13 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; GFX7-NEXT: v_mov_b32_e32 v5, v3
; GFX7-NEXT: v_mov_b32_e32 v4, v2
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5]
; GFX7-NEXT: v_mov_b32_e32 v0, v8
; GFX7-NEXT: v_mov_b32_e32 v1, v9
@@ -3931,7 +3929,9 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v11, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB16_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3947,15 +3947,13 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; GFX6-NEXT: v_mov_b32_e32 v5, v3
; GFX6-NEXT: v_mov_b32_e32 v4, v2
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, v8
@@ -3966,7 +3964,9 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_mov_b32_e32 v11, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB16_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4165,15 +4165,13 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1)
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040
+; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040
; GFX7-NEXT: v_mov_b32_e32 v5, v3
; GFX7-NEXT: v_mov_b32_e32 v4, v2
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5]
; GFX7-NEXT: v_mov_b32_e32 v0, v8
; GFX7-NEXT: v_mov_b32_e32 v1, v9
@@ -4183,7 +4181,9 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1)
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v11, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB17_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4199,15 +4199,13 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1)
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040
+; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040
; GFX6-NEXT: v_mov_b32_e32 v5, v3
; GFX6-NEXT: v_mov_b32_e32 v4, v2
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, v8
@@ -4218,7 +4216,9 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1)
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_mov_b32_e32 v11, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB17_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4413,34 +4413,32 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_neg(ptr addrspace(1)
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: s_movk_i32 s4, 0xf800
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
; GFX7-NEXT: s_mov_b32 s5, -1
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s6, 0
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
-; GFX7-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0
; GFX7-NEXT: v_mov_b32_e32 v5, v3
; GFX7-NEXT: v_mov_b32_e32 v4, v2
-; GFX7-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc
+; GFX7-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
; GFX7-NEXT: .LBB18_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
-; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v8
-; GFX7-NEXT: v_mov_b32_e32 v1, v9
-; GFX7-NEXT: v_mov_b32_e32 v2, v10
-; GFX7-NEXT: v_mov_b32_e32 v3, v11
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], -v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB18_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4451,35 +4449,33 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_neg(ptr addrspace(1)
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: s_movk_i32 s4, 0xf800
-; GFX6-NEXT: v_mov_b32_e32 v7, v1
-; GFX6-NEXT: v_mov_b32_e32 v6, v0
; GFX6-NEXT: s_mov_b32 s5, -1
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, 0
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
-; GFX6-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64
+; GFX6-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0
; GFX6-NEXT: v_mov_b32_e32 v5, v3
; GFX6-NEXT: v_mov_b32_e32 v4, v2
-; GFX6-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc
+; GFX6-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
; GFX6-NEXT: .LBB18_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
-; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5]
+; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], -v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, v8
-; GFX6-NEXT: v_mov_b32_e32 v1, v9
-; GFX6-NEXT: v_mov_b32_e32 v2, v10
-; GFX6-NEXT: v_mov_b32_e32 v3, v11
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB18_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
index 74f0f64..6a4c284 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
@@ -1502,13 +1502,11 @@ define i64 @global_atomic_sub_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB32_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_sub_i32_e32 v8, vcc, v10, v7
; SI-NEXT: v_subb_u32_e32 v9, vcc, v11, v6, vcc
; SI-NEXT: s_waitcnt expcnt(0)
@@ -1521,6 +1519,8 @@ define i64 @global_atomic_sub_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB32_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1593,13 +1593,11 @@ define i64 @global_atomic_sub_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB33_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_sub_i32_e32 v8, vcc, v10, v7
; SI-NEXT: v_subb_u32_e32 v9, vcc, v11, v6, vcc
; SI-NEXT: s_waitcnt expcnt(0)
@@ -1612,6 +1610,8 @@ define i64 @global_atomic_sub_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB33_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1883,43 +1883,42 @@ define amdgpu_gfx i64 @global_atomic_sub_i64_ret_scalar(ptr addrspace(1) inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v9, s6, 0
-; SI-NEXT: v_writelane_b32 v9, s7, 1
+; SI-NEXT: v_writelane_b32 v7, s6, 0
+; SI-NEXT: v_writelane_b32 v7, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
+; SI-NEXT: v_mov_b32_e32 v6, s35
; SI-NEXT: .LBB36_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v8, v1
-; SI-NEXT: v_mov_b32_e32 v7, v0
-; SI-NEXT: v_subrev_i32_e32 v5, vcc, s34, v7
-; SI-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v5
-; SI-NEXT: v_mov_b32_e32 v1, v6
-; SI-NEXT: v_mov_b32_e32 v2, v7
-; SI-NEXT: v_mov_b32_e32 v3, v8
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: v_subrev_i32_e32 v2, vcc, s34, v4
+; SI-NEXT: v_subb_u32_e32 v3, vcc, v5, v6, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB36_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v9, 1
-; SI-NEXT: v_readlane_b32 s6, v9, 0
+; SI-NEXT: v_readlane_b32 s7, v7, 1
+; SI-NEXT: v_readlane_b32 s6, v7, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -1985,43 +1984,42 @@ define amdgpu_gfx i64 @global_atomic_sub_i64_ret_offset_scalar(ptr addrspace(1)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v9, s6, 0
-; SI-NEXT: v_writelane_b32 v9, s7, 1
+; SI-NEXT: v_writelane_b32 v7, s6, 0
+; SI-NEXT: v_writelane_b32 v7, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
+; SI-NEXT: v_mov_b32_e32 v6, s35
; SI-NEXT: .LBB37_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v8, v1
-; SI-NEXT: v_mov_b32_e32 v7, v0
-; SI-NEXT: v_subrev_i32_e32 v5, vcc, s34, v7
-; SI-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v5
-; SI-NEXT: v_mov_b32_e32 v1, v6
-; SI-NEXT: v_mov_b32_e32 v2, v7
-; SI-NEXT: v_mov_b32_e32 v3, v8
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: v_subrev_i32_e32 v2, vcc, s34, v4
+; SI-NEXT: v_subb_u32_e32 v3, vcc, v5, v6, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB37_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v9, 1
-; SI-NEXT: v_readlane_b32 s6, v9, 0
+; SI-NEXT: v_readlane_b32 s7, v7, 1
+; SI-NEXT: v_readlane_b32 s6, v7, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -2342,13 +2340,11 @@ define i64 @global_atomic_and_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB42_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_and_b32_e32 v9, v11, v6
; SI-NEXT: v_and_b32_e32 v8, v10, v7
; SI-NEXT: s_waitcnt expcnt(0)
@@ -2361,6 +2357,8 @@ define i64 @global_atomic_and_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB42_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2433,13 +2431,11 @@ define i64 @global_atomic_and_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB43_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_and_b32_e32 v9, v11, v6
; SI-NEXT: v_and_b32_e32 v8, v10, v7
; SI-NEXT: s_waitcnt expcnt(0)
@@ -2452,6 +2448,8 @@ define i64 @global_atomic_and_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB43_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2726,14 +2724,11 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_scalar(ptr addrspace(1) inreg %
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB46_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v3, s34, v5
; SI-NEXT: v_and_b32_e32 v2, s35, v4
; SI-NEXT: v_mov_b32_e32 v0, v2
@@ -2745,6 +2740,8 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_scalar(ptr addrspace(1) inreg %
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB46_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2825,14 +2822,11 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_offset_scalar(ptr addrspace(1)
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB47_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v3, s34, v5
; SI-NEXT: v_and_b32_e32 v2, s35, v4
; SI-NEXT: v_mov_b32_e32 v0, v2
@@ -2844,6 +2838,8 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_offset_scalar(ptr addrspace(1)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB47_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3182,14 +3178,11 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB52_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, v11, v6
; SI-NEXT: v_and_b32_e32 v1, v10, v7
; SI-NEXT: v_not_b32_e32 v9, v0
@@ -3203,6 +3196,8 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB52_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3279,14 +3274,11 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB53_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, v11, v6
; SI-NEXT: v_and_b32_e32 v1, v10, v7
; SI-NEXT: v_not_b32_e32 v9, v0
@@ -3300,6 +3292,8 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB53_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3590,14 +3584,11 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB56_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, s34, v5
; SI-NEXT: v_and_b32_e32 v1, s35, v4
; SI-NEXT: v_not_b32_e32 v3, v0
@@ -3611,6 +3602,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB56_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3695,14 +3688,11 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1)
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB57_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, s34, v5
; SI-NEXT: v_and_b32_e32 v1, s35, v4
; SI-NEXT: v_not_b32_e32 v3, v0
@@ -3716,6 +3706,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB57_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3891,14 +3883,11 @@ define i64 @global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory(ptr addrs
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB59_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, v11, v6
; SI-NEXT: v_and_b32_e32 v1, v10, v7
; SI-NEXT: v_not_b32_e32 v9, v0
@@ -3912,6 +3901,8 @@ define i64 @global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory(ptr addrs
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB59_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4162,13 +4153,11 @@ define i64 @global_atomic_or_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB62_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_or_b32_e32 v9, v11, v6
; SI-NEXT: v_or_b32_e32 v8, v10, v7
; SI-NEXT: s_waitcnt expcnt(0)
@@ -4181,6 +4170,8 @@ define i64 @global_atomic_or_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB62_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4253,13 +4244,11 @@ define i64 @global_atomic_or_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB63_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_or_b32_e32 v9, v11, v6
; SI-NEXT: v_or_b32_e32 v8, v10, v7
; SI-NEXT: s_waitcnt expcnt(0)
@@ -4272,6 +4261,8 @@ define i64 @global_atomic_or_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB63_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4546,14 +4537,11 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_scalar(ptr addrspace(1) inreg %p
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB66_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_or_b32_e32 v3, s34, v5
; SI-NEXT: v_or_b32_e32 v2, s35, v4
; SI-NEXT: v_mov_b32_e32 v0, v2
@@ -4565,6 +4553,8 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_scalar(ptr addrspace(1) inreg %p
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB66_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4645,14 +4635,11 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_offset_scalar(ptr addrspace(1) i
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB67_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_or_b32_e32 v3, s34, v5
; SI-NEXT: v_or_b32_e32 v2, s35, v4
; SI-NEXT: v_mov_b32_e32 v0, v2
@@ -4664,6 +4651,8 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_offset_scalar(ptr addrspace(1) i
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB67_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4990,13 +4979,11 @@ define i64 @global_atomic_xor_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB72_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_xor_b32_e32 v9, v11, v6
; SI-NEXT: v_xor_b32_e32 v8, v10, v7
; SI-NEXT: s_waitcnt expcnt(0)
@@ -5009,6 +4996,8 @@ define i64 @global_atomic_xor_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB72_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5081,13 +5070,11 @@ define i64 @global_atomic_xor_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB73_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_xor_b32_e32 v9, v11, v6
; SI-NEXT: v_xor_b32_e32 v8, v10, v7
; SI-NEXT: s_waitcnt expcnt(0)
@@ -5100,6 +5087,8 @@ define i64 @global_atomic_xor_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB73_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5374,14 +5363,11 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_scalar(ptr addrspace(1) inreg %
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB76_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_xor_b32_e32 v3, s34, v5
; SI-NEXT: v_xor_b32_e32 v2, s35, v4
; SI-NEXT: v_mov_b32_e32 v0, v2
@@ -5393,6 +5379,8 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_scalar(ptr addrspace(1) inreg %
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB76_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5473,14 +5461,11 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_offset_scalar(ptr addrspace(1)
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB77_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_xor_b32_e32 v3, s34, v5
; SI-NEXT: v_xor_b32_e32 v2, s35, v4
; SI-NEXT: v_mov_b32_e32 v0, v2
@@ -5492,6 +5477,8 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_offset_scalar(ptr addrspace(1)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB77_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5824,13 +5811,11 @@ define i64 @global_atomic_max_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB82_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -5844,6 +5829,8 @@ define i64 @global_atomic_max_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB82_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5918,13 +5905,11 @@ define i64 @global_atomic_max_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB83_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -5938,6 +5923,8 @@ define i64 @global_atomic_max_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB83_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6223,45 +6210,45 @@ define amdgpu_gfx i64 @global_atomic_max_i64_ret_scalar(ptr addrspace(1) inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB86_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB86_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -6331,45 +6318,45 @@ define amdgpu_gfx i64 @global_atomic_max_i64_ret_offset_scalar(ptr addrspace(1)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB87_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB87_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -7176,13 +7163,11 @@ define i64 @global_atomic_umax_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB96_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -7196,6 +7181,8 @@ define i64 @global_atomic_umax_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB96_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7270,13 +7257,11 @@ define i64 @global_atomic_umax_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB97_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -7290,6 +7275,8 @@ define i64 @global_atomic_umax_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB97_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7575,45 +7562,45 @@ define amdgpu_gfx i64 @global_atomic_umax_i64_ret_scalar(ptr addrspace(1) inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB100_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB100_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -7683,45 +7670,45 @@ define amdgpu_gfx i64 @global_atomic_umax_i64_ret_offset_scalar(ptr addrspace(1)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB101_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB101_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -8416,13 +8403,11 @@ define i64 @global_atomic_umin_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB109_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -8436,6 +8421,8 @@ define i64 @global_atomic_umin_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB109_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8510,13 +8497,11 @@ define i64 @global_atomic_umin_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB110_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -8530,6 +8515,8 @@ define i64 @global_atomic_umin_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB110_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8815,45 +8802,45 @@ define amdgpu_gfx i64 @global_atomic_umin_i64_ret_scalar(ptr addrspace(1) inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB113_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB113_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -8923,45 +8910,45 @@ define amdgpu_gfx i64 @global_atomic_umin_i64_ret_offset_scalar(ptr addrspace(1)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB114_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB114_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -9292,13 +9279,11 @@ define i64 @global_atomic_min_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB119_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -9312,6 +9297,8 @@ define i64 @global_atomic_min_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB119_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9386,13 +9373,11 @@ define i64 @global_atomic_min_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB120_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -9406,6 +9391,8 @@ define i64 @global_atomic_min_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB120_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9691,45 +9678,45 @@ define amdgpu_gfx i64 @global_atomic_min_i64_ret_scalar(ptr addrspace(1) inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB123_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB123_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -9799,45 +9786,45 @@ define amdgpu_gfx i64 @global_atomic_min_i64_ret_offset_scalar(ptr addrspace(1)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB124_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB124_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -10645,14 +10632,11 @@ define i64 @global_atomic_uinc_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB133_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v10
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v11, vcc
; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[10:11], v[4:5]
@@ -10667,6 +10651,8 @@ define i64 @global_atomic_uinc_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB133_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -10745,14 +10731,11 @@ define i64 @global_atomic_uinc_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB134_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v10
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v11, vcc
; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[10:11], v[4:5]
@@ -10767,6 +10750,8 @@ define i64 @global_atomic_uinc_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB134_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -11065,14 +11050,11 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_scalar(ptr addrspace(1) i
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB137_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[34:35], v[4:5]
@@ -11087,6 +11069,8 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_scalar(ptr addrspace(1) i
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB137_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -11173,14 +11157,11 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_offset_scalar(ptr addrspa
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB138_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[34:35], v[4:5]
@@ -11195,6 +11176,8 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_offset_scalar(ptr addrspa
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB138_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -11557,14 +11540,11 @@ define i64 @global_atomic_udec_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: s_mov_b32 s8, s10
; SI-NEXT: s_mov_b32 s9, s10
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[8:11], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[8:11], 0 addr64
; SI-NEXT: s_mov_b64 s[6:7], 0
; SI-NEXT: .LBB143_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v10
; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v11, vcc
; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
@@ -11581,6 +11561,8 @@ define i64 @global_atomic_udec_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
; SI-NEXT: s_cbranch_execnz .LBB143_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -11663,14 +11645,11 @@ define i64 @global_atomic_udec_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: s_mov_b32 s8, s10
; SI-NEXT: s_mov_b32 s9, s10
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[8:11], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[8:11], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[6:7], 0
; SI-NEXT: .LBB144_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v10
; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v11, vcc
; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
@@ -11687,6 +11666,8 @@ define i64 @global_atomic_udec_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
; SI-NEXT: s_cbranch_execnz .LBB144_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -12004,49 +11985,48 @@ define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_scalar(ptr addrspace(1) i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[38:39], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB147_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v8
-; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v9, vcc
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[8:9]
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v5, vcc
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[4:5]
; SI-NEXT: s_or_b64 vcc, vcc, s[36:37]
-; SI-NEXT: v_cndmask_b32_e32 v7, v1, v4, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v0, v5, vcc
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v1, v6, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v0, v7, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[38:39]
; SI-NEXT: s_cbranch_execnz .LBB147_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[38:39]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -12124,49 +12104,48 @@ define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_offset_scalar(ptr addrspa
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[38:39], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB148_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v8
-; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v9, vcc
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[8:9]
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v5, vcc
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[4:5]
; SI-NEXT: s_or_b64 vcc, vcc, s[36:37]
-; SI-NEXT: v_cndmask_b32_e32 v7, v1, v4, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v0, v5, vcc
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v1, v6, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v0, v7, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[38:39]
; SI-NEXT: s_cbranch_execnz .LBB148_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[38:39]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll b/llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll
index 9b4539c..10d61de 100644
--- a/llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll
+++ b/llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll
@@ -6,96 +6,134 @@ define void @main(i1 %arg) #0 {
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1
-; CHECK-NEXT: buffer_store_dword v5, off, s[0:3], s32 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v6, off, s[0:3], s32 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
-; CHECK-NEXT: v_writelane_b32 v5, s30, 0
-; CHECK-NEXT: v_writelane_b32 v5, s31, 1
-; CHECK-NEXT: v_writelane_b32 v5, s36, 2
-; CHECK-NEXT: v_writelane_b32 v5, s37, 3
-; CHECK-NEXT: v_writelane_b32 v5, s38, 4
-; CHECK-NEXT: v_writelane_b32 v5, s39, 5
-; CHECK-NEXT: v_writelane_b32 v5, s48, 6
-; CHECK-NEXT: v_writelane_b32 v5, s49, 7
-; CHECK-NEXT: v_writelane_b32 v5, s50, 8
-; CHECK-NEXT: v_writelane_b32 v5, s51, 9
-; CHECK-NEXT: v_writelane_b32 v5, s52, 10
-; CHECK-NEXT: v_writelane_b32 v5, s53, 11
-; CHECK-NEXT: v_writelane_b32 v5, s54, 12
-; CHECK-NEXT: v_writelane_b32 v5, s55, 13
-; CHECK-NEXT: s_getpc_b64 s[24:25]
-; CHECK-NEXT: v_writelane_b32 v5, s64, 14
-; CHECK-NEXT: s_movk_i32 s4, 0xf0
-; CHECK-NEXT: s_mov_b32 s5, s24
-; CHECK-NEXT: v_writelane_b32 v5, s65, 15
-; CHECK-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x0
-; CHECK-NEXT: s_mov_b64 s[4:5], 0
-; CHECK-NEXT: v_writelane_b32 v5, s66, 16
-; CHECK-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
-; CHECK-NEXT: v_writelane_b32 v5, s67, 17
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: s_movk_i32 s6, 0x130
-; CHECK-NEXT: s_mov_b32 s7, s24
-; CHECK-NEXT: v_writelane_b32 v5, s68, 18
-; CHECK-NEXT: s_load_dwordx16 s[36:51], s[6:7], 0x0
-; CHECK-NEXT: v_writelane_b32 v5, s69, 19
-; CHECK-NEXT: v_writelane_b32 v5, s70, 20
+; CHECK-NEXT: v_writelane_b32 v6, s30, 0
+; CHECK-NEXT: v_writelane_b32 v6, s31, 1
+; CHECK-NEXT: v_writelane_b32 v6, s36, 2
+; CHECK-NEXT: v_writelane_b32 v6, s37, 3
+; CHECK-NEXT: v_writelane_b32 v6, s38, 4
+; CHECK-NEXT: v_writelane_b32 v6, s39, 5
+; CHECK-NEXT: v_writelane_b32 v6, s48, 6
+; CHECK-NEXT: v_writelane_b32 v6, s49, 7
+; CHECK-NEXT: v_writelane_b32 v6, s50, 8
+; CHECK-NEXT: v_writelane_b32 v6, s51, 9
+; CHECK-NEXT: v_writelane_b32 v6, s52, 10
+; CHECK-NEXT: v_writelane_b32 v6, s53, 11
+; CHECK-NEXT: v_writelane_b32 v6, s54, 12
+; CHECK-NEXT: v_writelane_b32 v6, s55, 13
+; CHECK-NEXT: v_writelane_b32 v6, s64, 14
+; CHECK-NEXT: v_writelane_b32 v6, s65, 15
+; CHECK-NEXT: v_writelane_b32 v6, s66, 16
+; CHECK-NEXT: v_writelane_b32 v6, s67, 17
+; CHECK-NEXT: v_writelane_b32 v6, s68, 18
+; CHECK-NEXT: s_getpc_b64 s[4:5]
+; CHECK-NEXT: s_mov_b64 s[8:9], 0
+; CHECK-NEXT: v_writelane_b32 v6, s69, 19
; CHECK-NEXT: s_mov_b32 s68, 0
-; CHECK-NEXT: v_writelane_b32 v5, s71, 21
+; CHECK-NEXT: s_mov_b32 s69, s4
+; CHECK-NEXT: s_load_dwordx4 s[4:7], s[8:9], 0x0
+; CHECK-NEXT: s_load_dwordx8 s[24:31], s[68:69], 0x30
+; CHECK-NEXT: s_load_dwordx16 s[52:67], s[68:69], 0xf0
+; CHECK-NEXT: ; kill: killed $sgpr8_sgpr9
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_load_dwordx16 s[8:23], s[68:69], 0x130
+; CHECK-NEXT: ; implicit-def: $vgpr7 : SGPR spill to VGPR lane
+; CHECK-NEXT: v_writelane_b32 v6, s70, 20
+; CHECK-NEXT: v_writelane_b32 v6, s71, 21
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v1, s4
; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_writelane_b32 v7, s8, 0
+; CHECK-NEXT: v_writelane_b32 v7, s9, 1
+; CHECK-NEXT: v_writelane_b32 v7, s10, 2
+; CHECK-NEXT: v_writelane_b32 v7, s11, 3
+; CHECK-NEXT: v_writelane_b32 v7, s12, 4
+; CHECK-NEXT: v_writelane_b32 v7, s13, 5
+; CHECK-NEXT: v_writelane_b32 v7, s14, 6
+; CHECK-NEXT: v_writelane_b32 v7, s15, 7
+; CHECK-NEXT: v_writelane_b32 v7, s16, 8
+; CHECK-NEXT: v_writelane_b32 v7, s17, 9
+; CHECK-NEXT: v_writelane_b32 v7, s18, 10
+; CHECK-NEXT: v_writelane_b32 v7, s19, 11
+; CHECK-NEXT: v_writelane_b32 v7, s20, 12
+; CHECK-NEXT: v_writelane_b32 v7, s21, 13
+; CHECK-NEXT: v_writelane_b32 v7, s22, 14
+; CHECK-NEXT: v_writelane_b32 v7, s23, 15
+; CHECK-NEXT: v_writelane_b32 v7, s52, 16
+; CHECK-NEXT: v_writelane_b32 v7, s53, 17
+; CHECK-NEXT: v_writelane_b32 v7, s54, 18
+; CHECK-NEXT: v_writelane_b32 v7, s55, 19
+; CHECK-NEXT: v_writelane_b32 v7, s56, 20
+; CHECK-NEXT: v_writelane_b32 v7, s57, 21
+; CHECK-NEXT: v_writelane_b32 v7, s58, 22
+; CHECK-NEXT: v_writelane_b32 v7, s59, 23
+; CHECK-NEXT: v_writelane_b32 v7, s60, 24
+; CHECK-NEXT: v_writelane_b32 v7, s61, 25
+; CHECK-NEXT: v_writelane_b32 v7, s62, 26
+; CHECK-NEXT: v_writelane_b32 v7, s63, 27
+; CHECK-NEXT: v_writelane_b32 v7, s64, 28
+; CHECK-NEXT: v_writelane_b32 v7, s65, 29
+; CHECK-NEXT: v_writelane_b32 v7, s66, 30
+; CHECK-NEXT: s_load_dwordx16 s[8:23], s[68:69], 0x1f0
+; CHECK-NEXT: s_load_dwordx16 s[36:51], s[68:69], 0x2f0
; CHECK-NEXT: s_mov_b32 s69, s68
; CHECK-NEXT: s_mov_b32 s70, s68
; CHECK-NEXT: s_mov_b32 s71, s68
-; CHECK-NEXT: image_sample_lz v3, v[1:2], s[16:23], s[68:71] dmask:0x1
+; CHECK-NEXT: v_writelane_b32 v7, s67, 31
+; CHECK-NEXT: image_sample_lz v3, v[1:2], s[60:67], s[68:71] dmask:0x1
+; CHECK-NEXT: v_readlane_b32 s52, v7, 0
; CHECK-NEXT: v_mov_b32_e32 v1, v2
-; CHECK-NEXT: ; implicit-def: $vgpr6 : SGPR spill to VGPR lane
-; CHECK-NEXT: s_mov_b32 s6, 48
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_writelane_b32 v6, s36, 0
-; CHECK-NEXT: v_writelane_b32 v6, s37, 1
-; CHECK-NEXT: v_writelane_b32 v6, s38, 2
-; CHECK-NEXT: v_writelane_b32 v6, s39, 3
-; CHECK-NEXT: v_writelane_b32 v6, s40, 4
-; CHECK-NEXT: v_writelane_b32 v6, s41, 5
-; CHECK-NEXT: image_sample_lz v4, v[1:2], s[36:43], s[68:71] dmask:0x1
-; CHECK-NEXT: v_writelane_b32 v6, s42, 6
-; CHECK-NEXT: v_writelane_b32 v6, s43, 7
-; CHECK-NEXT: v_writelane_b32 v6, s44, 8
-; CHECK-NEXT: v_writelane_b32 v6, s45, 9
-; CHECK-NEXT: v_writelane_b32 v6, s46, 10
-; CHECK-NEXT: v_writelane_b32 v6, s47, 11
-; CHECK-NEXT: v_writelane_b32 v6, s48, 12
-; CHECK-NEXT: v_writelane_b32 v6, s49, 13
-; CHECK-NEXT: v_writelane_b32 v6, s50, 14
-; CHECK-NEXT: s_movk_i32 s56, 0x1f0
-; CHECK-NEXT: s_movk_i32 s72, 0x2f0
-; CHECK-NEXT: s_mov_b32 s57, s24
-; CHECK-NEXT: s_mov_b32 s73, s24
-; CHECK-NEXT: v_writelane_b32 v6, s51, 15
-; CHECK-NEXT: s_load_dwordx8 s[24:31], s[6:7], 0x0
-; CHECK-NEXT: s_load_dwordx16 s[36:51], s[56:57], 0x0
-; CHECK-NEXT: v_and_b32_e32 v0, 1, v0
-; CHECK-NEXT: s_load_dwordx16 s[52:67], s[72:73], 0x0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s[4:5], 1, v0
+; CHECK-NEXT: v_readlane_b32 s53, v7, 1
+; CHECK-NEXT: v_readlane_b32 s54, v7, 2
+; CHECK-NEXT: v_readlane_b32 s55, v7, 3
+; CHECK-NEXT: v_readlane_b32 s56, v7, 4
+; CHECK-NEXT: v_readlane_b32 s57, v7, 5
+; CHECK-NEXT: v_readlane_b32 s58, v7, 6
+; CHECK-NEXT: v_readlane_b32 s59, v7, 7
+; CHECK-NEXT: v_and_b32_e32 v5, 1, v0
+; CHECK-NEXT: v_cmp_ne_u32_e64 s[4:5], 1, v5
+; CHECK-NEXT: v_readlane_b32 s60, v7, 8
+; CHECK-NEXT: v_readlane_b32 s61, v7, 9
+; CHECK-NEXT: v_readlane_b32 s62, v7, 10
+; CHECK-NEXT: image_sample_lz v4, v[1:2], s[52:59], s[68:71] dmask:0x1
+; CHECK-NEXT: v_readlane_b32 s63, v7, 11
+; CHECK-NEXT: v_readlane_b32 s64, v7, 12
+; CHECK-NEXT: v_readlane_b32 s65, v7, 13
+; CHECK-NEXT: v_readlane_b32 s66, v7, 14
+; CHECK-NEXT: v_readlane_b32 s67, v7, 15
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_mul_f32_e32 v0, v4, v3
; CHECK-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; CHECK-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
; CHECK-NEXT: s_cbranch_execz .LBB0_3
; CHECK-NEXT: ; %bb.1: ; %bb48
-; CHECK-NEXT: image_sample_lz v3, v[1:2], s[16:23], s[68:71] dmask:0x1
-; CHECK-NEXT: v_mov_b32_e32 v1, v2
+; CHECK-NEXT: v_readlane_b32 s52, v7, 16
+; CHECK-NEXT: v_readlane_b32 s60, v7, 24
+; CHECK-NEXT: v_readlane_b32 s61, v7, 25
+; CHECK-NEXT: v_readlane_b32 s62, v7, 26
+; CHECK-NEXT: v_readlane_b32 s63, v7, 27
+; CHECK-NEXT: v_readlane_b32 s64, v7, 28
+; CHECK-NEXT: v_readlane_b32 s65, v7, 29
+; CHECK-NEXT: v_readlane_b32 s66, v7, 30
+; CHECK-NEXT: v_readlane_b32 s67, v7, 31
; CHECK-NEXT: s_and_b64 vcc, exec, -1
+; CHECK-NEXT: v_readlane_b32 s53, v7, 17
+; CHECK-NEXT: v_readlane_b32 s54, v7, 18
+; CHECK-NEXT: v_readlane_b32 s55, v7, 19
+; CHECK-NEXT: v_readlane_b32 s56, v7, 20
+; CHECK-NEXT: image_sample_lz v3, v[1:2], s[60:67], s[68:71] dmask:0x1
+; CHECK-NEXT: v_mov_b32_e32 v1, v2
+; CHECK-NEXT: v_readlane_b32 s57, v7, 21
+; CHECK-NEXT: v_readlane_b32 s58, v7, 22
+; CHECK-NEXT: v_readlane_b32 s59, v7, 23
; CHECK-NEXT: .LBB0_2: ; %bb50
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_mov_b32 s69, s68
-; CHECK-NEXT: s_mov_b32 s70, s68
-; CHECK-NEXT: s_mov_b32 s71, s68
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: image_sample_lz v4, v[1:2], s[44:51], s[28:31] dmask:0x1
+; CHECK-NEXT: image_sample_lz v4, v[1:2], s[16:23], s[28:31] dmask:0x1
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: image_sample_lz v1, v[1:2], s[60:67], s[68:71] dmask:0x1
+; CHECK-NEXT: image_sample_lz v1, v[1:2], s[44:51], s[68:71] dmask:0x1
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_sub_f32_e32 v1, v1, v4
; CHECK-NEXT: v_mul_f32_e32 v1, v1, v0
@@ -103,60 +141,75 @@ define void @main(i1 %arg) #0 {
; CHECK-NEXT: s_mov_b64 vcc, vcc
; CHECK-NEXT: s_cbranch_vccnz .LBB0_2
; CHECK-NEXT: .LBB0_3: ; %Flow14
-; CHECK-NEXT: s_andn2_saveexec_b64 s[20:21], s[6:7]
+; CHECK-NEXT: s_andn2_saveexec_b64 s[6:7], s[6:7]
; CHECK-NEXT: s_cbranch_execz .LBB0_10
; CHECK-NEXT: ; %bb.4: ; %bb32
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_and_saveexec_b64 s[16:17], s[4:5]
-; CHECK-NEXT: s_xor_b64 s[22:23], exec, s[16:17]
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[16:17]
; CHECK-NEXT: s_cbranch_execz .LBB0_6
; CHECK-NEXT: ; %bb.5: ; %bb43
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: s_mov_b32 s44, 0
-; CHECK-NEXT: s_mov_b32 s45, s44
-; CHECK-NEXT: v_mov_b32_e32 v2, s44
-; CHECK-NEXT: v_mov_b32_e32 v3, s45
-; CHECK-NEXT: s_mov_b32 s46, s44
-; CHECK-NEXT: s_mov_b32 s47, s44
-; CHECK-NEXT: image_sample_lz v1, v[2:3], s[8:15], s[44:47] dmask:0x1
-; CHECK-NEXT: v_readlane_b32 s4, v6, 0
-; CHECK-NEXT: v_readlane_b32 s12, v6, 8
-; CHECK-NEXT: v_readlane_b32 s13, v6, 9
-; CHECK-NEXT: v_readlane_b32 s14, v6, 10
-; CHECK-NEXT: v_readlane_b32 s15, v6, 11
-; CHECK-NEXT: v_readlane_b32 s16, v6, 12
-; CHECK-NEXT: v_readlane_b32 s17, v6, 13
-; CHECK-NEXT: v_readlane_b32 s18, v6, 14
-; CHECK-NEXT: v_readlane_b32 s19, v6, 15
-; CHECK-NEXT: v_readlane_b32 s5, v6, 1
-; CHECK-NEXT: v_readlane_b32 s6, v6, 2
-; CHECK-NEXT: v_readlane_b32 s7, v6, 3
-; CHECK-NEXT: v_readlane_b32 s8, v6, 4
-; CHECK-NEXT: v_readlane_b32 s9, v6, 5
-; CHECK-NEXT: image_sample_lz v0, v[2:3], s[12:19], s[24:27] dmask:0x1
-; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: v_mov_b32_e32 v3, v2
-; CHECK-NEXT: v_readlane_b32 s10, v6, 6
-; CHECK-NEXT: v_readlane_b32 s11, v6, 7
+; CHECK-NEXT: s_mov_b32 s16, 0
+; CHECK-NEXT: s_mov_b32 s17, s16
+; CHECK-NEXT: v_mov_b32_e32 v0, s16
+; CHECK-NEXT: v_readlane_b32 s44, v7, 16
+; CHECK-NEXT: v_mov_b32_e32 v1, s17
+; CHECK-NEXT: s_mov_b32 s18, s16
+; CHECK-NEXT: s_mov_b32 s19, s16
+; CHECK-NEXT: v_readlane_b32 s45, v7, 17
+; CHECK-NEXT: v_readlane_b32 s46, v7, 18
+; CHECK-NEXT: v_readlane_b32 s47, v7, 19
+; CHECK-NEXT: v_readlane_b32 s48, v7, 20
+; CHECK-NEXT: v_readlane_b32 s49, v7, 21
+; CHECK-NEXT: v_readlane_b32 s50, v7, 22
+; CHECK-NEXT: v_readlane_b32 s51, v7, 23
+; CHECK-NEXT: v_readlane_b32 s52, v7, 24
+; CHECK-NEXT: v_readlane_b32 s53, v7, 25
+; CHECK-NEXT: v_readlane_b32 s54, v7, 26
+; CHECK-NEXT: v_readlane_b32 s55, v7, 27
+; CHECK-NEXT: v_readlane_b32 s56, v7, 28
+; CHECK-NEXT: v_readlane_b32 s57, v7, 29
+; CHECK-NEXT: v_readlane_b32 s58, v7, 30
+; CHECK-NEXT: v_readlane_b32 s59, v7, 31
+; CHECK-NEXT: image_sample_lz v2, v[0:1], s[44:51], s[16:19] dmask:0x1
+; CHECK-NEXT: v_readlane_b32 s44, v7, 0
+; CHECK-NEXT: v_readlane_b32 s52, v7, 8
+; CHECK-NEXT: v_readlane_b32 s53, v7, 9
+; CHECK-NEXT: v_readlane_b32 s54, v7, 10
+; CHECK-NEXT: v_readlane_b32 s55, v7, 11
+; CHECK-NEXT: v_readlane_b32 s56, v7, 12
+; CHECK-NEXT: v_readlane_b32 s57, v7, 13
+; CHECK-NEXT: v_readlane_b32 s58, v7, 14
+; CHECK-NEXT: v_readlane_b32 s59, v7, 15
+; CHECK-NEXT: v_mov_b32_e32 v3, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, v3
+; CHECK-NEXT: v_readlane_b32 s45, v7, 1
+; CHECK-NEXT: v_readlane_b32 s46, v7, 2
+; CHECK-NEXT: v_readlane_b32 s47, v7, 3
+; CHECK-NEXT: image_sample_lz v0, v[0:1], s[52:59], s[24:27] dmask:0x1
+; CHECK-NEXT: v_readlane_b32 s48, v7, 4
+; CHECK-NEXT: v_readlane_b32 s49, v7, 5
+; CHECK-NEXT: v_readlane_b32 s50, v7, 6
+; CHECK-NEXT: v_readlane_b32 s51, v7, 7
; CHECK-NEXT: s_waitcnt vmcnt(1)
-; CHECK-NEXT: buffer_store_dwordx3 v[1:3], off, s[44:47], 0
+; CHECK-NEXT: buffer_store_dwordx3 v[2:4], off, s[16:19], 0
; CHECK-NEXT: s_waitcnt vmcnt(1)
-; CHECK-NEXT: buffer_store_dwordx4 v[0:3], off, s[44:47], 0
+; CHECK-NEXT: buffer_store_dwordx4 v[0:3], off, s[16:19], 0
; CHECK-NEXT: ; implicit-def: $vgpr0
; CHECK-NEXT: .LBB0_6: ; %Flow12
-; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[22:23]
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; CHECK-NEXT: s_cbranch_execz .LBB0_9
; CHECK-NEXT: ; %bb.7: ; %bb33.preheader
-; CHECK-NEXT: s_mov_b32 s8, 0
-; CHECK-NEXT: s_mov_b32 s12, s8
-; CHECK-NEXT: s_mov_b32 s13, s8
-; CHECK-NEXT: v_mov_b32_e32 v1, s12
-; CHECK-NEXT: s_mov_b32 s9, s8
-; CHECK-NEXT: s_mov_b32 s10, s8
-; CHECK-NEXT: s_mov_b32 s11, s8
-; CHECK-NEXT: v_mov_b32_e32 v2, s13
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: image_sample_lz v3, v[1:2], s[36:43], s[8:11] dmask:0x1
-; CHECK-NEXT: image_sample_lz v4, v[1:2], s[52:59], s[8:11] dmask:0x1
+; CHECK-NEXT: s_mov_b32 s16, 0
+; CHECK-NEXT: s_mov_b32 s20, s16
+; CHECK-NEXT: s_mov_b32 s21, s16
+; CHECK-NEXT: v_mov_b32_e32 v1, s20
+; CHECK-NEXT: s_mov_b32 s17, s16
+; CHECK-NEXT: s_mov_b32 s18, s16
+; CHECK-NEXT: s_mov_b32 s19, s16
+; CHECK-NEXT: v_mov_b32_e32 v2, s21
+; CHECK-NEXT: image_sample_lz v3, v[1:2], s[8:15], s[16:19] dmask:0x1
+; CHECK-NEXT: image_sample_lz v4, v[1:2], s[36:43], s[16:19] dmask:0x1
; CHECK-NEXT: s_and_b64 vcc, exec, 0
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_sub_f32_e32 v1, v4, v3
@@ -171,33 +224,33 @@ define void @main(i1 %arg) #0 {
; CHECK-NEXT: .LBB0_9: ; %Flow13
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: .LBB0_10: ; %UnifiedReturnBlock
-; CHECK-NEXT: s_or_b64 exec, exec, s[20:21]
-; CHECK-NEXT: v_readlane_b32 s71, v5, 21
-; CHECK-NEXT: v_readlane_b32 s70, v5, 20
-; CHECK-NEXT: v_readlane_b32 s69, v5, 19
-; CHECK-NEXT: v_readlane_b32 s68, v5, 18
+; CHECK-NEXT: s_or_b64 exec, exec, s[6:7]
+; CHECK-NEXT: v_readlane_b32 s71, v6, 21
+; CHECK-NEXT: v_readlane_b32 s70, v6, 20
+; CHECK-NEXT: v_readlane_b32 s69, v6, 19
+; CHECK-NEXT: v_readlane_b32 s68, v6, 18
+; CHECK-NEXT: v_readlane_b32 s67, v6, 17
+; CHECK-NEXT: v_readlane_b32 s66, v6, 16
+; CHECK-NEXT: v_readlane_b32 s65, v6, 15
+; CHECK-NEXT: v_readlane_b32 s64, v6, 14
+; CHECK-NEXT: v_readlane_b32 s55, v6, 13
+; CHECK-NEXT: v_readlane_b32 s54, v6, 12
+; CHECK-NEXT: v_readlane_b32 s53, v6, 11
+; CHECK-NEXT: v_readlane_b32 s52, v6, 10
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_readlane_b32 s67, v5, 17
-; CHECK-NEXT: v_readlane_b32 s66, v5, 16
-; CHECK-NEXT: v_readlane_b32 s65, v5, 15
-; CHECK-NEXT: v_readlane_b32 s64, v5, 14
-; CHECK-NEXT: v_readlane_b32 s55, v5, 13
-; CHECK-NEXT: v_readlane_b32 s54, v5, 12
-; CHECK-NEXT: v_readlane_b32 s53, v5, 11
-; CHECK-NEXT: v_readlane_b32 s52, v5, 10
-; CHECK-NEXT: v_readlane_b32 s51, v5, 9
-; CHECK-NEXT: v_readlane_b32 s50, v5, 8
-; CHECK-NEXT: v_readlane_b32 s49, v5, 7
-; CHECK-NEXT: v_readlane_b32 s48, v5, 6
-; CHECK-NEXT: v_readlane_b32 s39, v5, 5
-; CHECK-NEXT: v_readlane_b32 s38, v5, 4
-; CHECK-NEXT: v_readlane_b32 s37, v5, 3
-; CHECK-NEXT: v_readlane_b32 s36, v5, 2
-; CHECK-NEXT: v_readlane_b32 s31, v5, 1
-; CHECK-NEXT: v_readlane_b32 s30, v5, 0
+; CHECK-NEXT: v_readlane_b32 s51, v6, 9
+; CHECK-NEXT: v_readlane_b32 s50, v6, 8
+; CHECK-NEXT: v_readlane_b32 s49, v6, 7
+; CHECK-NEXT: v_readlane_b32 s48, v6, 6
+; CHECK-NEXT: v_readlane_b32 s39, v6, 5
+; CHECK-NEXT: v_readlane_b32 s38, v6, 4
+; CHECK-NEXT: v_readlane_b32 s37, v6, 3
+; CHECK-NEXT: v_readlane_b32 s36, v6, 2
+; CHECK-NEXT: v_readlane_b32 s31, v6, 1
+; CHECK-NEXT: v_readlane_b32 s30, v6, 0
; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1
-; CHECK-NEXT: buffer_load_dword v5, off, s[0:3], s32 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll b/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll
index 59dfd71..bd11b07 100644
--- a/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll
+++ b/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll
@@ -11,8 +11,8 @@ define protected amdgpu_kernel void @InferNothing(i32 %a, ptr %b, double %c) {
; CHECK-NEXT: v_mov_b32_e32 v2, s2
; CHECK-NEXT: v_mov_b32_e32 v3, s3
; CHECK-NEXT: s_lshl_b64 s[2:3], s[6:7], 3
-; CHECK-NEXT: s_add_u32 s0, s2, s0
-; CHECK-NEXT: s_addc_u32 s1, s3, s1
+; CHECK-NEXT: s_add_u32 s0, s0, s2
+; CHECK-NEXT: s_addc_u32 s1, s1, s3
; CHECK-NEXT: v_mov_b32_e32 v1, s1
; CHECK-NEXT: v_add_co_u32_e64 v0, vcc, -8, s0
; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v1, vcc
@@ -69,13 +69,13 @@ define protected amdgpu_kernel void @InferMixed(i32 %a, ptr addrspace(1) %b, dou
; CHECK-NEXT: s_lshl_b64 s[2:3], s[6:7], 3
; CHECK-NEXT: s_add_u32 s0, s0, s2
; CHECK-NEXT: s_addc_u32 s1, s1, s3
+; CHECK-NEXT: s_add_u32 s0, s0, -8
+; CHECK-NEXT: s_addc_u32 s1, s1, -1
; CHECK-NEXT: flat_atomic_add_f64 v[0:1], v[2:3]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: buffer_wbinvl1_vol
-; CHECK-NEXT: v_mov_b32_e32 v1, s1
-; CHECK-NEXT: v_add_co_u32_e64 v0, vcc, -7, s0
-; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v1, vcc
-; CHECK-NEXT: flat_atomic_add_f64 v[0:1], v[2:3]
+; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
+; CHECK-NEXT: flat_atomic_add_f64 v[0:1], v[2:3] offset:1
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: buffer_wbinvl1_vol
; CHECK-NEXT: s_endpgm
@@ -113,7 +113,7 @@ define protected amdgpu_kernel void @InferPHI(i32 %a, ptr addrspace(1) %b, doubl
; CHECK-NEXT: s_addc_u32 s1, s1, s5
; CHECK-NEXT: s_add_u32 s4, s0, -8
; CHECK-NEXT: s_addc_u32 s5, s1, -1
-; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 9
+; CHECK-NEXT: s_cmp_eq_u64 s[4:5], 1
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0
diff --git a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir
index 92836d8..63db24a 100644
--- a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir
+++ b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir
@@ -486,7 +486,7 @@ body: |
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; CHECK-NEXT: INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; CHECK-NEXT: S_ENDPGM 0
bb.0:
S_NOP 0, implicit-def $agpr0
@@ -516,7 +516,7 @@ body: |
S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
- INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, %0:vreg_512_align2
+ INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, %0:vreg_512_align2
S_ENDPGM 0
...
@@ -1368,7 +1368,7 @@ body: |
; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
@@ -1408,7 +1408,7 @@ body: |
undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec
early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, %4
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, %4
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
@@ -1726,7 +1726,7 @@ body: |
; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
@@ -1763,7 +1763,7 @@ body: |
undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
%0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
%4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, %4
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, %4
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
diff --git a/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll b/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
index 9cbdc38..5b3e486 100644
--- a/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
@@ -8,16 +8,16 @@
define amdgpu_kernel void @s_input_output_i128() {
; GFX908-LABEL: name: s_input_output_i128
; GFX908: bb.0 (%ir-block.0):
- ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 9633802 /* regdef:SGPR_128 */, def %13
+ ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10682378 /* regdef:SGPR_128 */, def %13
; GFX908-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY %13
- ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9633801 /* reguse:SGPR_128 */, [[COPY]]
+ ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 10682377 /* reguse:SGPR_128 */, [[COPY]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: s_input_output_i128
; GFX90A: bb.0 (%ir-block.0):
- ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 9633802 /* regdef:SGPR_128 */, def %11
+ ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10682378 /* regdef:SGPR_128 */, def %11
; GFX90A-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY %11
- ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9633801 /* reguse:SGPR_128 */, [[COPY]]
+ ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 10682377 /* reguse:SGPR_128 */, [[COPY]]
; GFX90A-NEXT: S_ENDPGM 0
%val = tail call i128 asm sideeffect "; def $0", "=s"()
call void asm sideeffect "; use $0", "s"(i128 %val)
@@ -27,16 +27,16 @@ define amdgpu_kernel void @s_input_output_i128() {
define amdgpu_kernel void @v_input_output_i128() {
; GFX908-LABEL: name: v_input_output_i128
; GFX908: bb.0 (%ir-block.0):
- ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7798794 /* regdef:VReg_128 */, def %13
+ ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7929866 /* regdef:VReg_128 */, def %13
; GFX908-NEXT: [[COPY:%[0-9]+]]:vreg_128 = COPY %13
- ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7798793 /* reguse:VReg_128 */, [[COPY]]
+ ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7929865 /* reguse:VReg_128 */, [[COPY]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: v_input_output_i128
; GFX90A: bb.0 (%ir-block.0):
- ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7995402 /* regdef:VReg_128_Align2 */, def %11
+ ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8257546 /* regdef:VReg_128_Align2 */, def %11
; GFX90A-NEXT: [[COPY:%[0-9]+]]:vreg_128_align2 = COPY %11
- ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7995401 /* reguse:VReg_128_Align2 */, [[COPY]]
+ ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8257545 /* reguse:VReg_128_Align2 */, [[COPY]]
; GFX90A-NEXT: S_ENDPGM 0
%val = tail call i128 asm sideeffect "; def $0", "=v"()
call void asm sideeffect "; use $0", "v"(i128 %val)
@@ -47,16 +47,16 @@ define amdgpu_kernel void @a_input_output_i128() {
; GFX908-LABEL: name: a_input_output_i128
; GFX908: bb.0 (%ir-block.0):
- ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8323082 /* regdef:AReg_128 */, def %13
+ ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8847370 /* regdef:AReg_128 */, def %13
; GFX908-NEXT: [[COPY:%[0-9]+]]:areg_128 = COPY %13
- ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY]]
+ ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: a_input_output_i128
; GFX90A: bb.0 (%ir-block.0):
- ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8650762 /* regdef:AReg_128_Align2 */, def %11
+ ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 9568266 /* regdef:AReg_128_Align2 */, def %11
; GFX90A-NEXT: [[COPY:%[0-9]+]]:areg_128_align2 = COPY %11
- ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY]]
+ ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY]]
; GFX90A-NEXT: S_ENDPGM 0
%val = call i128 asm sideeffect "; def $0", "=a"()
call void asm sideeffect "; use $0", "a"(i128 %val)
diff --git a/llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll b/llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll
index 48bf7fb..3eef616 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll
+++ b/llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll
@@ -46,8 +46,8 @@ define void @use_extern_normal() #0 {
; CHECK-NEXT: s_ashr_i32 s5, s15, 31
; CHECK-NEXT: v_mov_b32_e32 v0, 0x4048f5c3
; CHECK-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
-; CHECK-NEXT: s_add_u32 s4, s4, s6
-; CHECK-NEXT: s_addc_u32 s5, s5, s7
+; CHECK-NEXT: s_add_u32 s4, s6, s4
+; CHECK-NEXT: s_addc_u32 s5, s7, s5
; CHECK-NEXT: s_load_dword s4, s[4:5], 0x0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v1, s4
@@ -70,8 +70,8 @@ define void @use_extern_overalign() #0 {
; CHECK-NEXT: s_ashr_i32 s5, s15, 31
; CHECK-NEXT: v_mov_b32_e32 v0, 0x42280000
; CHECK-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
-; CHECK-NEXT: s_add_u32 s4, s4, s6
-; CHECK-NEXT: s_addc_u32 s5, s5, s7
+; CHECK-NEXT: s_add_u32 s4, s6, s4
+; CHECK-NEXT: s_addc_u32 s5, s7, s5
; CHECK-NEXT: s_load_dword s4, s[4:5], 0x0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v1, s4
diff --git a/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir b/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir
index ca77482..fa52b96 100644
--- a/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir
+++ b/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir
@@ -1,19 +1,9 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
# RUN: llc -mtriple=amdgcn -run-pass register-coalescer -o - %s | FileCheck %s
-# Check that coalescer does not create wider register tuple than in source
-
-# CHECK: - { id: 2, class: vreg_64, preferred-register: '', flags: [ ] }
-# CHECK: - { id: 3, class: vreg_64, preferred-register: '', flags: [ ] }
-# CHECK: - { id: 4, class: vreg_64, preferred-register: '', flags: [ ] }
-# CHECK: - { id: 5, class: vreg_96, preferred-register: '', flags: [ ] }
-# CHECK: - { id: 6, class: vreg_96, preferred-register: '', flags: [ ] }
-# CHECK: - { id: 7, class: vreg_128, preferred-register: '', flags: [ ] }
-# CHECK: - { id: 8, class: vreg_128, preferred-register: '', flags: [ ] }
+# Check that coalescer does not create wider register tuple than in
+# source.
# No more registers shall be defined
-# CHECK-NEXT: liveins:
-# CHECK: FLAT_STORE_DWORDX2 $vgpr0_vgpr1, %4,
-# CHECK: FLAT_STORE_DWORDX3 $vgpr0_vgpr1, %6,
-
---
name: main
alignment: 1
@@ -52,6 +42,23 @@ body: |
bb.0.entry:
liveins: $sgpr0, $vgpr0_vgpr1
+ ; CHECK-LABEL: name: main
+ ; CHECK: liveins: $sgpr0, $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY [[DEF]].sub0
+ ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0:vreg_64 = COPY [[COPY]].sub1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:vreg_64 = COPY [[COPY]].sub0
+ ; CHECK-NEXT: FLAT_STORE_DWORDX2 $vgpr0_vgpr1, [[COPY1]], 0, 0, implicit $exec, implicit $flat_scr
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vreg_96 = IMPLICIT_DEF
+ ; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub0_sub1:vreg_96 = COPY [[DEF1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]].sub2:vreg_96 = COPY [[DEF]].sub0
+ ; CHECK-NEXT: FLAT_STORE_DWORDX3 $vgpr0_vgpr1, [[COPY2]], 0, 0, implicit $exec, implicit $flat_scr
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vreg_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1_sub2:vreg_128 = COPY [[DEF2]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub3:vreg_128 = COPY [[DEF]].sub0
+ ; CHECK-NEXT: FLAT_STORE_DWORDX4 $vgpr0_vgpr1, [[COPY3]], 0, 0, implicit $exec, implicit $flat_scr
%3 = IMPLICIT_DEF
undef %4.sub0 = COPY $sgpr0
%4.sub1 = COPY %3.sub0
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
index bc3d378..3aa3663 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
@@ -11,9 +11,9 @@
; GCN-O0: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O0>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(atomic-expand,verify,gc-lowering,lower-constant-intrinsics,unreachableblockelim,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa,require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,localstackalloc))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,phi-node-elimination,two-address-instruction,regallocfast,si-fix-vgpr-copies,remove-redundant-debug-values,fixup-statepoint-caller-saved,prolog-epilog,post-ra-pseudos,si-post-ra-bundler,fentry-insert,xray-instrumentation,patchable-function,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
-; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O2>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
+; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O2>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
-; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O3>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
+; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O3>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
define void @empty() {
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 65d0102..6e52125 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -232,15 +232,15 @@
; GCN-O1-NEXT: AMDGPU Preload Kernel Arguments
; GCN-O1-NEXT: FunctionPass Manager
; GCN-O1-NEXT: AMDGPU Lower Kernel Arguments
+; GCN-O1-NEXT: Dominator Tree Construction
+; GCN-O1-NEXT: Natural Loop Information
+; GCN-O1-NEXT: CodeGen Prepare
; GCN-O1-NEXT: Lower buffer fat pointer operations to buffer resources
; GCN-O1-NEXT: AMDGPU lower intrinsics
; GCN-O1-NEXT: CallGraph Construction
; GCN-O1-NEXT: Call Graph SCC Pass Manager
; GCN-O1-NEXT: DummyCGSCCPass
; GCN-O1-NEXT: FunctionPass Manager
-; GCN-O1-NEXT: Dominator Tree Construction
-; GCN-O1-NEXT: Natural Loop Information
-; GCN-O1-NEXT: CodeGen Prepare
; GCN-O1-NEXT: Lazy Value Information Analysis
; GCN-O1-NEXT: Lower SwitchInst's to branches
; GCN-O1-NEXT: Lower invoke and unwind, for unwindless code generators
@@ -533,21 +533,21 @@
; GCN-O1-OPTS-NEXT: AMDGPU Preload Kernel Arguments
; GCN-O1-OPTS-NEXT: FunctionPass Manager
; GCN-O1-OPTS-NEXT: AMDGPU Lower Kernel Arguments
+; GCN-O1-OPTS-NEXT: Dominator Tree Construction
+; GCN-O1-OPTS-NEXT: Natural Loop Information
+; GCN-O1-OPTS-NEXT: CodeGen Prepare
+; GCN-O1-OPTS-NEXT: Dominator Tree Construction
+; GCN-O1-OPTS-NEXT: Basic Alias Analysis (stateless AA impl)
+; GCN-O1-OPTS-NEXT: Function Alias Analysis Results
+; GCN-O1-OPTS-NEXT: Natural Loop Information
+; GCN-O1-OPTS-NEXT: Scalar Evolution Analysis
+; GCN-O1-OPTS-NEXT: GPU Load and Store Vectorizer
; GCN-O1-OPTS-NEXT: Lower buffer fat pointer operations to buffer resources
; GCN-O1-OPTS-NEXT: AMDGPU lower intrinsics
; GCN-O1-OPTS-NEXT: CallGraph Construction
; GCN-O1-OPTS-NEXT: Call Graph SCC Pass Manager
; GCN-O1-OPTS-NEXT: DummyCGSCCPass
; GCN-O1-OPTS-NEXT: FunctionPass Manager
-; GCN-O1-OPTS-NEXT: Dominator Tree Construction
-; GCN-O1-OPTS-NEXT: Natural Loop Information
-; GCN-O1-OPTS-NEXT: CodeGen Prepare
-; GCN-O1-OPTS-NEXT: Dominator Tree Construction
-; GCN-O1-OPTS-NEXT: Basic Alias Analysis (stateless AA impl)
-; GCN-O1-OPTS-NEXT: Function Alias Analysis Results
-; GCN-O1-OPTS-NEXT: Natural Loop Information
-; GCN-O1-OPTS-NEXT: Scalar Evolution Analysis
-; GCN-O1-OPTS-NEXT: GPU Load and Store Vectorizer
; GCN-O1-OPTS-NEXT: Lazy Value Information Analysis
; GCN-O1-OPTS-NEXT: Lower SwitchInst's to branches
; GCN-O1-OPTS-NEXT: Lower invoke and unwind, for unwindless code generators
@@ -852,21 +852,21 @@
; GCN-O2-NEXT: AMDGPU Preload Kernel Arguments
; GCN-O2-NEXT: FunctionPass Manager
; GCN-O2-NEXT: AMDGPU Lower Kernel Arguments
+; GCN-O2-NEXT: Dominator Tree Construction
+; GCN-O2-NEXT: Natural Loop Information
+; GCN-O2-NEXT: CodeGen Prepare
+; GCN-O2-NEXT: Dominator Tree Construction
+; GCN-O2-NEXT: Basic Alias Analysis (stateless AA impl)
+; GCN-O2-NEXT: Function Alias Analysis Results
+; GCN-O2-NEXT: Natural Loop Information
+; GCN-O2-NEXT: Scalar Evolution Analysis
+; GCN-O2-NEXT: GPU Load and Store Vectorizer
; GCN-O2-NEXT: Lower buffer fat pointer operations to buffer resources
; GCN-O2-NEXT: AMDGPU lower intrinsics
; GCN-O2-NEXT: CallGraph Construction
; GCN-O2-NEXT: Call Graph SCC Pass Manager
; GCN-O2-NEXT: DummyCGSCCPass
; GCN-O2-NEXT: FunctionPass Manager
-; GCN-O2-NEXT: Dominator Tree Construction
-; GCN-O2-NEXT: Natural Loop Information
-; GCN-O2-NEXT: CodeGen Prepare
-; GCN-O2-NEXT: Dominator Tree Construction
-; GCN-O2-NEXT: Basic Alias Analysis (stateless AA impl)
-; GCN-O2-NEXT: Function Alias Analysis Results
-; GCN-O2-NEXT: Natural Loop Information
-; GCN-O2-NEXT: Scalar Evolution Analysis
-; GCN-O2-NEXT: GPU Load and Store Vectorizer
; GCN-O2-NEXT: Lazy Value Information Analysis
; GCN-O2-NEXT: Lower SwitchInst's to branches
; GCN-O2-NEXT: Lower invoke and unwind, for unwindless code generators
@@ -1186,21 +1186,21 @@
; GCN-O3-NEXT: AMDGPU Preload Kernel Arguments
; GCN-O3-NEXT: FunctionPass Manager
; GCN-O3-NEXT: AMDGPU Lower Kernel Arguments
+; GCN-O3-NEXT: Dominator Tree Construction
+; GCN-O3-NEXT: Natural Loop Information
+; GCN-O3-NEXT: CodeGen Prepare
+; GCN-O3-NEXT: Dominator Tree Construction
+; GCN-O3-NEXT: Basic Alias Analysis (stateless AA impl)
+; GCN-O3-NEXT: Function Alias Analysis Results
+; GCN-O3-NEXT: Natural Loop Information
+; GCN-O3-NEXT: Scalar Evolution Analysis
+; GCN-O3-NEXT: GPU Load and Store Vectorizer
; GCN-O3-NEXT: Lower buffer fat pointer operations to buffer resources
; GCN-O3-NEXT: AMDGPU lower intrinsics
; GCN-O3-NEXT: CallGraph Construction
; GCN-O3-NEXT: Call Graph SCC Pass Manager
; GCN-O3-NEXT: DummyCGSCCPass
; GCN-O3-NEXT: FunctionPass Manager
-; GCN-O3-NEXT: Dominator Tree Construction
-; GCN-O3-NEXT: Natural Loop Information
-; GCN-O3-NEXT: CodeGen Prepare
-; GCN-O3-NEXT: Dominator Tree Construction
-; GCN-O3-NEXT: Basic Alias Analysis (stateless AA impl)
-; GCN-O3-NEXT: Function Alias Analysis Results
-; GCN-O3-NEXT: Natural Loop Information
-; GCN-O3-NEXT: Scalar Evolution Analysis
-; GCN-O3-NEXT: GPU Load and Store Vectorizer
; GCN-O3-NEXT: Lazy Value Information Analysis
; GCN-O3-NEXT: Lower SwitchInst's to branches
; GCN-O3-NEXT: Lower invoke and unwind, for unwindless code generators
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
index de7d234..b9bf76c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG %s
declare i32 @llvm.amdgcn.s.quadmask.i32(i32)
declare i64 @llvm.amdgcn.s.quadmask.i64(i64)
@@ -172,3 +172,91 @@ entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %mask)
ret i64 %qm
}
+
+;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b32 implicitly defines SCC.
+define amdgpu_kernel void @test_scc_quadmask_32(i32 %val0, i32 %val1, ptr addrspace(1) %ptr) {
+; GFX11-GISEL-LABEL: test_scc_quadmask_32:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_and_b32 s0, s0, 1
+; GFX11-GISEL-NEXT: s_quadmask_b32 s1, s1
+; GFX11-GISEL-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, s1
+; GFX11-GISEL-NEXT: s_cselect_b32 s0, 1, 0
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v4, s0
+; GFX11-GISEL-NEXT: global_store_b32 v2, v3, s[2:3]
+; GFX11-GISEL-NEXT: global_store_b32 v[0:1], v4, off
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX11-SDAG-LABEL: test_scc_quadmask_32:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_and_b32 s0, s0, 1
+; GFX11-SDAG-NEXT: s_quadmask_b32 s1, s1
+; GFX11-SDAG-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1
+; GFX11-SDAG-NEXT: s_cselect_b32 s0, -1, 0
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, s0
+; GFX11-SDAG-NEXT: global_store_b32 v2, v3, s[2:3]
+; GFX11-SDAG-NEXT: global_store_b32 v[0:1], v4, off
+; GFX11-SDAG-NEXT: s_endpgm
+ %and = and i32 %val0, 1
+ %result = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %val1) nounwind readnone
+ store i32 %result, ptr addrspace(1) %ptr
+ %cmp = icmp eq i32 %and, 0
+ %sel = select i1 %cmp, i32 1, i32 0
+ store i32 %sel, ptr addrspace(1) null, align 4
+ ret void
+}
+
+;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b64 implicitly defines SCC.
+define amdgpu_kernel void @test_scc_quadmask_64(i32 %val0, i64 %val1, ptr addrspace(1) %ptr) {
+; GFX11-GISEL-LABEL: test_scc_quadmask_64:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_clause 0x1
+; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c
+; GFX11-GISEL-NEXT: s_load_b32 s4, s[4:5], 0x24
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_quadmask_b64 s[0:1], s[0:1]
+; GFX11-GISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-GISEL-NEXT: s_cmp_eq_u32 s4, 0
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v1, s1
+; GFX11-GISEL-NEXT: s_cselect_b32 s0, 1, 0
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v5, s0
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GFX11-GISEL-NEXT: global_store_b64 v4, v[0:1], s[2:3]
+; GFX11-GISEL-NEXT: global_store_b32 v[2:3], v5, off
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX11-SDAG-LABEL: test_scc_quadmask_64:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_clause 0x1
+; GFX11-SDAG-NEXT: s_load_b32 s6, s[4:5], 0x24
+; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_and_b32 s4, s6, 1
+; GFX11-SDAG-NEXT: s_quadmask_b64 s[0:1], s[0:1]
+; GFX11-SDAG-NEXT: s_cmp_eq_u32 s4, 0
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-SDAG-NEXT: s_cselect_b32 s0, -1, 0
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v5, 0, 1, s0
+; GFX11-SDAG-NEXT: global_store_b64 v4, v[2:3], s[2:3]
+; GFX11-SDAG-NEXT: global_store_b32 v[0:1], v5, off
+; GFX11-SDAG-NEXT: s_endpgm
+ %and = and i32 %val0, 1
+ %result = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %val1) nounwind readnone
+ store i64 %result, ptr addrspace(1) %ptr
+ %cmp = icmp eq i32 %and, 0
+ %sel = select i1 %cmp, i32 1, i32 0
+ store i32 %sel, ptr addrspace(1) null, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
index ba5ce8b..8bb7274 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
@@ -76,13 +76,12 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
; SI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
-; SI-NEXT: s_movk_i32 s4, 0xfc01
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_mov_b32 s3, 0xfffff
; SI-NEXT: v_mov_b32_e32 v8, 0x3ff00000
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_bfe_u32 v4, v3, 20, 11
-; SI-NEXT: v_add_i32_e32 v6, vcc, s4, v4
+; SI-NEXT: v_add_i32_e32 v6, vcc, 0xfffffc01, v4
; SI-NEXT: v_lshr_b64 v[4:5], s[2:3], v6
; SI-NEXT: v_and_b32_e32 v7, 0x80000000, v3
; SI-NEXT: v_bfi_b32 v5, v5, 0, v3
diff --git a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
index ea9d5e8..1e6b77e 100644
--- a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
@@ -400,9 +400,9 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v2, s1, v0, s6
+; GFX12-NEXT: v_add_co_u32 v2, s1, s6, v0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
+; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
; GFX12-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-NEXT: s_wait_alu 0xf1ff
@@ -438,9 +438,9 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-SPREFETCH-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-SPREFETCH-NEXT: s_wait_kmcnt 0x0
-; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, v0, s6
+; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, s6, v0
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
+; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-SPREFETCH-NEXT: s_wait_alu 0xf1ff
@@ -531,9 +531,9 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d,
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v2, s1, v0, s6
+; GFX12-NEXT: v_add_co_u32 v2, s1, s6, v0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
+; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
; GFX12-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-NEXT: s_wait_alu 0xf1ff
@@ -569,9 +569,9 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d,
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-SPREFETCH-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-SPREFETCH-NEXT: s_wait_kmcnt 0x0
-; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, v0, s6
+; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, s6, v0
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
+; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-SPREFETCH-NEXT: s_wait_alu 0xf1ff
diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll
index 0de7f8f..bd29e9e 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals
-; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s
+; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s | FileCheck %s
; Regression test for issue 160181
; One variable is chosen to be assigned at zero. Here, that's @both
@@ -22,12 +22,20 @@
;.
; CHECK: @llvm.amdgcn.module.lds = internal addrspace(3) global %llvm.amdgcn.module.lds.t poison, align 4, !absolute_symbol [[META0:![0-9]+]]
; CHECK: @llvm.compiler.used = appending addrspace(1) global [1 x ptr] [ptr addrspacecast (ptr addrspace(3) @llvm.amdgcn.module.lds to ptr)], section "llvm.metadata"
+; CHECK: @llvm.amdgcn.kernel.kern_one.lds = internal addrspace(3) global %llvm.amdgcn.kernel.kern_one.lds.t poison, align 4, !absolute_symbol [[META1:![0-9]+]]
+; CHECK: @llvm.amdgcn.kernel.kern_two.lds = internal addrspace(3) global %llvm.amdgcn.kernel.kern_two.lds.t poison, align 4, !absolute_symbol [[META1]]
+; CHECK: @llvm.amdgcn.kernel.kern_block_direct_allocation.lds = internal addrspace(3) global %llvm.amdgcn.kernel.kern_block_direct_allocation.lds.t poison, align 4, !absolute_symbol [[META1]]
+
;.
define void @func_one() {
; CHECK-LABEL: define {{[^@]+}}@func_one() {
-; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META1:![0-9]+]]
-; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META18:![0-9]+]]
-; CHECK-NEXT: store i16 10, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 3), align 4, !noalias [[META23:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META2:![0-9]+]]
+; CHECK-NEXT: [[ONE:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[ONE]], align 4
+; CHECK-NEXT: [[ONE1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3)
+; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) [[ONE1]], align 4
+; CHECK-NEXT: store i16 10, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META11:![0-9]+]]
; CHECK-NEXT: ret void
;
%val0 = load i32, ptr addrspace(3) @both
@@ -38,9 +46,10 @@ define void @func_one() {
define amdgpu_kernel void @kern_one() {
; CHECK-LABEL: define {{[^@]+}}@kern_one
-; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: () #[[ATTR0:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META16:![0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !noalias [[META24:![0-9]+]]
+; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kern_one.lds) ]
+; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !noalias [[META17:![0-9]+]]
; CHECK-NEXT: call void @func_one()
; CHECK-NEXT: ret void
;
@@ -51,9 +60,13 @@ entry:
define void @func_two() {
; CHECK-LABEL: define {{[^@]+}}@func_two() {
-; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META1]]
-; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 2), align 4, !noalias [[META25:![0-9]+]]
-; CHECK-NEXT: store i16 20, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 3), align 4, !noalias [[META23]]
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META2]]
+; CHECK-NEXT: [[TWO:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TWO]], align 4
+; CHECK-NEXT: [[TWO1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3)
+; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) [[TWO1]], align 4
+; CHECK-NEXT: store i16 20, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META11]]
; CHECK-NEXT: ret void
;
%val0 = load i32, ptr addrspace(3) @both
@@ -64,9 +77,10 @@ define void @func_two() {
define amdgpu_kernel void @kern_two() {
; CHECK-LABEL: define {{[^@]+}}@kern_two
-; CHECK-SAME: () #[[ATTR0]] {
+; CHECK-SAME: () #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META18:![0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !alias.scope [[META26:![0-9]+]], !noalias [[META27:![0-9]+]]
+; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kern_two.lds) ]
+; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !alias.scope [[META19:![0-9]+]], !noalias [[META20:![0-9]+]]
; CHECK-NEXT: call void @func_two()
; CHECK-NEXT: ret void
;
@@ -82,11 +96,18 @@ entry:
; remains the best candidate for address zero allocation.
define void @func_block_direct_allocation() {
; CHECK-LABEL: define {{[^@]+}}@func_block_direct_allocation() {
-; CHECK-NEXT: [[VAL1:%.*]] = load i32, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META18]]
-; CHECK-NEXT: [[VAL2:%.*]] = load i32, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 2), align 4, !noalias [[META25]]
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT: [[ONE:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[ONE]], align 4
+; CHECK-NEXT: [[ONE1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3)
+; CHECK-NEXT: [[VAL1:%.*]] = load i32, ptr addrspace(3) [[ONE1]], align 4
+; CHECK-NEXT: [[TWO:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TWO]], align 4
+; CHECK-NEXT: [[TWO2:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT: [[VAL2:%.*]] = load i32, ptr addrspace(3) [[TWO2]], align 4
; CHECK-NEXT: [[SUM:%.*]] = add i32 [[VAL1]], [[VAL2]]
-; CHECK-NEXT: store i32 [[SUM]], ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META1]]
-; CHECK-NEXT: store i16 30, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 3), align 4, !noalias [[META23]]
+; CHECK-NEXT: store i32 [[SUM]], ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META2]]
+; CHECK-NEXT: store i16 30, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META11]]
; CHECK-NEXT: ret void
;
%val1 = load i32, ptr addrspace(3) @one
@@ -99,7 +120,8 @@ define void @func_block_direct_allocation() {
define amdgpu_kernel void @kern_block_direct_allocation() {
; CHECK-LABEL: define {{[^@]+}}@kern_block_direct_allocation
-; CHECK-SAME: () #[[ATTR0]] {
+; CHECK-SAME: () #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META21:![0-9]+]] {
+; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kern_block_direct_allocation.lds) ], !alias.scope [[META22:![0-9]+]], !noalias [[META25:![0-9]+]]
; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ]
; CHECK-NEXT: call void @func_block_direct_allocation()
; CHECK-NEXT: call void @func_one()
@@ -112,35 +134,8 @@ define amdgpu_kernel void @kern_block_direct_allocation() {
ret void
}
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-lds-size"="16" }
-; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
-;.
-; CHECK: [[META0]] = !{i32 0, i32 1}
-; CHECK: [[META1]] = !{[[META2:![0-9]+]], [[META4:![0-9]+]], [[META5:![0-9]+]], [[META6:![0-9]+]], [[META8:![0-9]+]], [[META9:![0-9]+]], [[META10:![0-9]+]], [[META12:![0-9]+]], [[META13:![0-9]+]], [[META14:![0-9]+]], [[META16:![0-9]+]], [[META17:![0-9]+]]}
-; CHECK: [[META2]] = distinct !{[[META2]], [[META3:![0-9]+]]}
-; CHECK: [[META3]] = distinct !{[[META3]]}
-; CHECK: [[META4]] = distinct !{[[META4]], [[META3]]}
-; CHECK: [[META5]] = distinct !{[[META5]], [[META3]]}
-; CHECK: [[META6]] = distinct !{[[META6]], [[META7:![0-9]+]]}
-; CHECK: [[META7]] = distinct !{[[META7]]}
-; CHECK: [[META8]] = distinct !{[[META8]], [[META7]]}
-; CHECK: [[META9]] = distinct !{[[META9]], [[META7]]}
-; CHECK: [[META10]] = distinct !{[[META10]], [[META11:![0-9]+]]}
-; CHECK: [[META11]] = distinct !{[[META11]]}
-; CHECK: [[META12]] = distinct !{[[META12]], [[META11]]}
-; CHECK: [[META13]] = distinct !{[[META13]], [[META11]]}
-; CHECK: [[META14]] = distinct !{[[META14]], [[META15:![0-9]+]]}
-; CHECK: [[META15]] = distinct !{[[META15]]}
-; CHECK: [[META16]] = distinct !{[[META16]], [[META15]]}
-; CHECK: [[META17]] = distinct !{[[META17]], [[META15]]}
-; CHECK: [[META18]] = !{[[META19:![0-9]+]], [[META2]], [[META5]], [[META20:![0-9]+]], [[META6]], [[META9]], [[META21:![0-9]+]], [[META10]], [[META13]], [[META22:![0-9]+]], [[META14]], [[META17]]}
-; CHECK: [[META19]] = distinct !{[[META19]], [[META3]]}
-; CHECK: [[META20]] = distinct !{[[META20]], [[META7]]}
-; CHECK: [[META21]] = distinct !{[[META21]], [[META11]]}
-; CHECK: [[META22]] = distinct !{[[META22]], [[META15]]}
-; CHECK: [[META23]] = !{[[META19]], [[META4]], [[META5]], [[META20]], [[META8]], [[META9]], [[META21]], [[META12]], [[META13]], [[META22]], [[META16]], [[META17]]}
-; CHECK: [[META24]] = !{[[META10]], [[META12]], [[META13]], [[META14]], [[META16]], [[META17]]}
-; CHECK: [[META25]] = !{[[META19]], [[META2]], [[META4]], [[META20]], [[META6]], [[META8]], [[META21]], [[META10]], [[META12]], [[META22]], [[META14]], [[META16]]}
-; CHECK: [[META26]] = !{[[META22]]}
-; CHECK: [[META27]] = !{[[META14]], [[META16]], [[META17]]}
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-lds-size"="12" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-lds-size"="16" }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll
index b6f70fa..12212a0 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll
@@ -84,8 +84,8 @@ define void @f2() {
; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+4
; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+12
; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
-; GCN-NEXT: s_add_u32 s4, s4, s6
-; GCN-NEXT: s_addc_u32 s5, s5, s7
+; GCN-NEXT: s_add_u32 s4, s6, s4
+; GCN-NEXT: s_addc_u32 s5, s7, s5
; GCN-NEXT: s_load_dword s4, s[4:5], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v2, s4
diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll
index c316f03..b689e1e 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll
@@ -49,8 +49,8 @@ define void @f0() {
; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+4
; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+12
; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 4
-; GCN-NEXT: s_add_u32 s4, s4, s6
-; GCN-NEXT: s_addc_u32 s5, s5, s7
+; GCN-NEXT: s_add_u32 s4, s6, s4
+; GCN-NEXT: s_addc_u32 s5, s7, s5
; GCN-NEXT: s_load_dword s4, s[4:5], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s4
@@ -90,8 +90,8 @@ define void @f1() {
; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+8
; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+16
; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 4
-; GCN-NEXT: s_add_u32 s4, s4, s6
-; GCN-NEXT: s_addc_u32 s5, s5, s7
+; GCN-NEXT: s_add_u32 s4, s6, s4
+; GCN-NEXT: s_addc_u32 s5, s7, s5
; GCN-NEXT: s_load_dword s4, s[4:5], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s4
@@ -131,8 +131,8 @@ define void @f2() {
; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+12
; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+20
; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 4
-; GCN-NEXT: s_add_u32 s4, s4, s6
-; GCN-NEXT: s_addc_u32 s5, s5, s7
+; GCN-NEXT: s_add_u32 s4, s6, s4
+; GCN-NEXT: s_addc_u32 s5, s7, s5
; GCN-NEXT: s_load_dword s4, s[4:5], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v2, s4
@@ -172,8 +172,8 @@ define void @f3() {
; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+16
; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+24
; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 4
-; GCN-NEXT: s_add_u32 s4, s4, s6
-; GCN-NEXT: s_addc_u32 s5, s5, s7
+; GCN-NEXT: s_add_u32 s4, s6, s4
+; GCN-NEXT: s_addc_u32 s5, s7, s5
; GCN-NEXT: s_load_dword s4, s[4:5], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s4
diff --git a/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll b/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll
index 65b4d37..93d772f 100644
--- a/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll
+++ b/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll
@@ -13,9 +13,9 @@ define amdgpu_kernel void @test(ptr addrspace(1) %src, ptr addrspace(1) %dst) {
; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
; GFX9-NEXT: s_mul_i32 s14, s14, s4
; GFX9-NEXT: s_add_i32 s5, s5, s14
-; GFX9-NEXT: v_add_u32_e32 v0, s5, v0
-; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GFX9-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, s5, v0
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: v_ashrrev_i64 v[4:5], 28, v[0:1]
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v4
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
@@ -37,12 +37,12 @@ define amdgpu_kernel void @test(ptr addrspace(1) %src, ptr addrspace(1) %dst) {
; GFX10-NEXT: s_load_dword s4, s[8:9], 0x1c
; GFX10-NEXT: s_load_dword s5, s[8:9], 0x38
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_and_b32 s4, s4, 0xffff
; GFX10-NEXT: s_mul_i32 s14, s14, s4
-; GFX10-NEXT: v_add3_u32 v0, s5, s14, v0
-; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GFX10-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX10-NEXT: v_add3_u32 v2, s5, s14, v0
+; GFX10-NEXT: v_ashrrev_i64 v[4:5], 28, v[1:2]
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, s0, v4
; GFX10-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v5, vcc_lo
; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, s2, v4
@@ -62,21 +62,19 @@ define amdgpu_kernel void @test(ptr addrspace(1) %src, ptr addrspace(1) %dst) {
; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x1c
; GFX11-NEXT: s_load_b32 s7, s[4:5], 0x38
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_and_b32 v1, 0x3ff, v0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_and_b32 s4, s6, 0xffff
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_mul_i32 s13, s13, s4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_add3_u32 v0, s7, s13, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GFX11-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX11-NEXT: v_add3_u32 v1, s7, s13, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_ashrrev_i64 v[4:5], 28, v[0:1]
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, s0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v5, vcc_lo
; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, s2, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, s3, v5, vcc_lo
; GFX11-NEXT: global_load_b128 v[0:3], v[0:1], off
; GFX11-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
index c92c672..ca4f5d2 100644
--- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
@@ -51,7 +51,7 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) {
; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, s4, v2
; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, v3, v7, vcc
; CHECK-NEXT: s_add_u32 s4, s4, 1
-; CHECK-NEXT: s_addc_u32 s5, s5, 0
+; CHECK-NEXT: s_addc_u32 s5, 0, s5
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[6:7], v10
; CHECK-NEXT: ; %bb.7:
diff --git a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
index dd5c247..14b0729 100644
--- a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
@@ -388,8 +388,8 @@ define void @memmove_p0_p3(ptr addrspace(0) align 1 %dst, ptr addrspace(3) align
; CHECK-NEXT: s_and_saveexec_b32 s7, s4
; CHECK-NEXT: s_cbranch_execz .LBB2_13
; CHECK-NEXT: ; %bb.11: ; %memmove_bwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v9, s4, v3, v0
-; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v4, v1, s4
+; CHECK-NEXT: v_add_co_u32 v9, s4, v0, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v1, v4, s4
; CHECK-NEXT: v_add3_u32 v4, v3, v2, -1
; CHECK-NEXT: v_add_co_u32 v9, s4, v9, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v10, s4
@@ -684,8 +684,8 @@ define void @memmove_p0_p5(ptr addrspace(0) align 1 %dst, ptr addrspace(5) align
; CHECK-NEXT: s_and_saveexec_b32 s7, s4
; CHECK-NEXT: s_cbranch_execz .LBB4_13
; CHECK-NEXT: ; %bb.11: ; %memmove_bwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v9, s4, v3, v0
-; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v4, v1, s4
+; CHECK-NEXT: v_add_co_u32 v9, s4, v0, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v1, v4, s4
; CHECK-NEXT: v_add3_u32 v4, v3, v2, -1
; CHECK-NEXT: v_add_co_u32 v9, s4, v9, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v10, s4
@@ -1411,8 +1411,8 @@ define void @memmove_p3_p0(ptr addrspace(3) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: s_and_saveexec_b32 s7, s4
; CHECK-NEXT: s_cbranch_execz .LBB10_13
; CHECK-NEXT: ; %bb.11: ; %memmove_bwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v9, s4, v3, v1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v4, v2, s4
+; CHECK-NEXT: v_add_co_u32 v9, s4, v1, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v2, v4, s4
; CHECK-NEXT: v_add3_u32 v4, v3, v0, -1
; CHECK-NEXT: v_add_co_u32 v9, s4, v9, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v10, s4
@@ -1889,8 +1889,8 @@ define void @memmove_p5_p0(ptr addrspace(5) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: s_and_saveexec_b32 s7, s4
; CHECK-NEXT: s_cbranch_execz .LBB15_13
; CHECK-NEXT: ; %bb.11: ; %memmove_bwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v9, s4, v3, v1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v4, v2, s4
+; CHECK-NEXT: v_add_co_u32 v9, s4, v1, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v2, v4, s4
; CHECK-NEXT: v_add3_u32 v4, v3, v0, -1
; CHECK-NEXT: v_add_co_u32 v9, s4, v9, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v10, s4
diff --git a/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll
index 6d0aa1e..7e4be65 100644
--- a/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll
+++ b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll
@@ -9,92 +9,65 @@ define protected amdgpu_kernel void @no_folding_imm_to_inst_with_fi(<4 x i64> %v
; CHECK-NEXT: s_load_b512 s[16:31], s[4:5], 0xe4
; CHECK-NEXT: s_load_b512 s[0:15], s[4:5], 0xa4
; CHECK-NEXT: s_mov_b64 s[34:35], src_private_base
-; CHECK-NEXT: s_movk_i32 s33, 0x70
-; CHECK-NEXT: s_movk_i32 s34, 0x60
-; CHECK-NEXT: s_or_b32 s44, 0x80, s33
-; CHECK-NEXT: s_mov_b32 s45, s35
-; CHECK-NEXT: s_or_b32 s46, 0x80, s34
-; CHECK-NEXT: s_mov_b32 s47, s35
-; CHECK-NEXT: v_dual_mov_b32 v20, s44 :: v_dual_mov_b32 v21, s45
-; CHECK-NEXT: v_dual_mov_b32 v22, s46 :: v_dual_mov_b32 v23, s47
; CHECK-NEXT: s_movk_i32 s34, 0x80
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: v_dual_mov_b32 v34, s34 :: v_dual_mov_b32 v35, s35
+; CHECK-NEXT: v_dual_mov_b32 v20, s34 :: v_dual_mov_b32 v21, s35
; CHECK-NEXT: s_wait_kmcnt 0x0
; CHECK-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v1, s41
; CHECK-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
; CHECK-NEXT: v_dual_mov_b32 v4, s36 :: v_dual_mov_b32 v5, s37
; CHECK-NEXT: v_dual_mov_b32 v6, s38 :: v_dual_mov_b32 v7, s39
-; CHECK-NEXT: scratch_store_b128 off, v[0:3], off offset:16 scope:SCOPE_SYS
-; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: v_dual_mov_b32 v0, s20 :: v_dual_mov_b32 v1, s21
-; CHECK-NEXT: s_movk_i32 s20, 0x50
; CHECK-NEXT: v_dual_mov_b32 v8, s28 :: v_dual_mov_b32 v9, s29
; CHECK-NEXT: v_dual_mov_b32 v10, s30 :: v_dual_mov_b32 v11, s31
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_or_b32 s20, 0x80, s20
-; CHECK-NEXT: s_mov_b32 s21, s35
; CHECK-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; CHECK-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; CHECK-NEXT: v_dual_mov_b32 v2, s22 :: v_dual_mov_b32 v3, s23
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: v_dual_mov_b32 v25, s21 :: v_dual_mov_b32 v24, s20
+; CHECK-NEXT: v_dual_mov_b32 v16, s20 :: v_dual_mov_b32 v17, s21
+; CHECK-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v19, s23
+; CHECK-NEXT: scratch_store_b128 off, v[0:3], off offset:16 scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: scratch_store_b128 off, v[4:7], off scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] offset:112 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[22:23], v[12:15] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[12:15] offset:96 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[24:25], v[0:3] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[16:19] offset:80 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v1, s17
-; CHECK-NEXT: s_or_b32 s16, 0x80, 64
-; CHECK-NEXT: s_mov_b32 s17, s35
-; CHECK-NEXT: v_dual_mov_b32 v4, s12 :: v_dual_mov_b32 v5, s13
-; CHECK-NEXT: s_or_b32 s12, 0x80, 48
-; CHECK-NEXT: s_mov_b32 s13, s35
-; CHECK-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; CHECK-NEXT: s_or_b32 s8, 0x80, 32
-; CHECK-NEXT: s_mov_b32 s9, s35
-; CHECK-NEXT: v_dual_mov_b32 v12, s4 :: v_dual_mov_b32 v13, s5
-; CHECK-NEXT: s_or_b32 s4, 0x80, 16
-; CHECK-NEXT: s_mov_b32 s5, s35
; CHECK-NEXT: v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v3, s19
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: v_dual_mov_b32 v27, s17 :: v_dual_mov_b32 v26, s16
+; CHECK-NEXT: v_dual_mov_b32 v4, s12 :: v_dual_mov_b32 v5, s13
; CHECK-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s15
-; CHECK-NEXT: v_dual_mov_b32 v29, s13 :: v_dual_mov_b32 v28, s12
-; CHECK-NEXT: v_dual_mov_b32 v31, s9 :: v_dual_mov_b32 v30, s8
-; CHECK-NEXT: v_dual_mov_b32 v33, s5 :: v_dual_mov_b32 v32, s4
+; CHECK-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; CHECK-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
+; CHECK-NEXT: v_dual_mov_b32 v12, s4 :: v_dual_mov_b32 v13, s5
; CHECK-NEXT: v_dual_mov_b32 v14, s6 :: v_dual_mov_b32 v15, s7
; CHECK-NEXT: v_dual_mov_b32 v16, s0 :: v_dual_mov_b32 v17, s1
; CHECK-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v19, s3
-; CHECK-NEXT: flat_store_b128 v[26:27], v[0:3] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[0:3] offset:64 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[28:29], v[4:7] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[4:7] offset:48 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[30:31], v[8:11] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] offset:32 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[32:33], v[12:15] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[12:15] offset:16 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[34:35], v[16:19] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[16:19] scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[22:23] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:96 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:112 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[26:27] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:64 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[24:25] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:80 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[30:31] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:32 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[28:29] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:48 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[34:35] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[32:33] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:16 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt 0x0
; CHECK-NEXT: s_endpgm
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
index 6509d80..f88b1bf 100644
--- a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
@@ -12,7 +12,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; REGALLOC-GFX908-NEXT: liveins: $sgpr4_sgpr5
; REGALLOC-GFX908-NEXT: {{ $}}
; REGALLOC-GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef %6:agpr_32
- ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7798794 /* regdef:VReg_128 */, def %25
+ ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7929866 /* regdef:VReg_128 */, def %25
; REGALLOC-GFX908-NEXT: [[COPY:%[0-9]+]]:av_128 = COPY %25
; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3735562 /* regdef:VReg_64 */, def %27
; REGALLOC-GFX908-NEXT: SI_SPILL_AV64_SAVE %27, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
@@ -37,7 +37,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; PEI-GFX908-NEXT: $sgpr12 = S_ADD_U32 $sgpr12, $sgpr9, implicit-def $scc, implicit-def $sgpr12_sgpr13_sgpr14_sgpr15
; PEI-GFX908-NEXT: $sgpr13 = S_ADDC_U32 $sgpr13, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr12_sgpr13_sgpr14_sgpr15
; PEI-GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef renamable $agpr0
- ; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7798794 /* regdef:VReg_128 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3
+ ; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7929866 /* regdef:VReg_128 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3
; PEI-GFX908-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, implicit $exec
; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3735562 /* regdef:VReg_64 */, def renamable $vgpr0_vgpr1
; PEI-GFX908-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr12_sgpr13_sgpr14_sgpr15, 0, 0, 0, 0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $vgpr0_vgpr1 :: (store (s32) into %stack.0, addrspace 5)
@@ -61,7 +61,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; REGALLOC-GFX90A-NEXT: liveins: $sgpr4_sgpr5
; REGALLOC-GFX90A-NEXT: {{ $}}
; REGALLOC-GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef %6:agpr_32
- ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7995402 /* regdef:VReg_128_Align2 */, def %23
+ ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8257546 /* regdef:VReg_128_Align2 */, def %23
; REGALLOC-GFX90A-NEXT: [[COPY:%[0-9]+]]:av_128_align2 = COPY %23
; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3997706 /* regdef:VReg_64_Align2 */, def %21
; REGALLOC-GFX90A-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY %21
@@ -80,7 +80,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; PEI-GFX90A-NEXT: liveins: $sgpr4_sgpr5
; PEI-GFX90A-NEXT: {{ $}}
; PEI-GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef renamable $agpr0
- ; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7995402 /* regdef:VReg_128_Align2 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3
+ ; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8257546 /* regdef:VReg_128_Align2 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3
; PEI-GFX90A-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, implicit $exec
; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3997706 /* regdef:VReg_64_Align2 */, def renamable $vgpr2_vgpr3
; PEI-GFX90A-NEXT: GLOBAL_STORE_DWORDX4 undef renamable $vgpr0_vgpr1, killed renamable $agpr0_agpr1_agpr2_agpr3, 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) poison`, addrspace 1)
diff --git a/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir b/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir
index f1f2eb6..c9645c3 100644
--- a/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir
+++ b/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir
@@ -80,3 +80,151 @@ body: |
%4:vreg_128 = REG_SEQUENCE %3.sub0, %subreg.sub0, %3.sub1, %subreg.sub1, %3.sub2, %subreg.sub2, %3.sub3, %subreg.sub3
KILL implicit %4
...
+
+---
+name: copy_vreg_64_subreg_from_vgpr_reg_sequence
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence
+ ; GCN: liveins: $vgpr0, $vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GCN-NEXT: $vgpr0 = COPY [[COPY2]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1
+ %3:vgpr_32 = COPY %2.sub0
+ $vgpr0 = COPY %3
+...
+
+---
+name: copy_vreg_64_subreg_from_vgpr_reg_sequence_extra_copy
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence_extra_copy
+ ; GCN: liveins: $vgpr0, $vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0
+ ; GCN-NEXT: $vgpr0 = COPY [[COPY3]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1
+ %3:vreg_64 = COPY %2
+ %4:vgpr_32 = COPY %3.sub0
+ $vgpr0 = COPY %4
+...
+
+---
+name: copy_av_64_subreg_from_vgpr_reg_sequence
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; GCN-LABEL: name: copy_av_64_subreg_from_vgpr_reg_sequence
+ ; GCN: liveins: $vgpr0, $vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE]]
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0
+ ; GCN-NEXT: $vgpr0 = COPY [[COPY3]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vreg_64_align2 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1
+ %3:av_64_align2 = COPY %2
+ %4:vgpr_32 = COPY %3.sub0
+ $vgpr0 = COPY %4
+...
+
+---
+name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub0_compose
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1
+ ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub0_compose
+ ; GCN: liveins: $vgpr0_vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]].sub0, %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
+ ; GCN-NEXT: $vgpr0 = COPY [[COPY2]]
+ %0:vreg_64 = COPY $vgpr0_vgpr1
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vreg_64 = REG_SEQUENCE %0.sub0, %subreg.sub0, %1, %subreg.sub1
+ %3:vgpr_32 = COPY %2.sub0
+ $vgpr0 = COPY %3
+...
+
+---
+name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub1_compose
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1
+ ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub1_compose
+ ; GCN: liveins: $vgpr0_vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]].sub1, %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1
+ ; GCN-NEXT: $vgpr0 = COPY [[COPY2]]
+ %0:vreg_64 = COPY $vgpr0_vgpr1
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vreg_64 = REG_SEQUENCE %0.sub1, %subreg.sub0, %1, %subreg.sub1
+ %3:vgpr_32 = COPY %2.sub0
+ $vgpr0 = COPY %3
+...
+
+---
+name: copy_vreg_64_subreg_from_multiple_vgpr_reg_sequence
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: copy_vreg_64_subreg_from_multiple_vgpr_reg_sequence
+ ; GCN: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub1_sub2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0
+ ; GCN-NEXT: $vgpr0_vgpr1 = COPY [[COPY4]]
+ ; GCN-NEXT: $vgpr2_vgpr3 = COPY [[COPY5]]
+ ; GCN-NEXT: $vgpr4_vgpr5 = COPY [[COPY6]]
+ ; GCN-NEXT: $vgpr6 = COPY [[COPY7]]
+ ; GCN-NEXT: $vgpr6 = COPY [[COPY8]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vgpr_32 = COPY $vgpr2
+ %3:vgpr_32 = COPY $vgpr3
+ %4:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1
+ %5:vreg_64 = REG_SEQUENCE %2, %subreg.sub0, %3, %subreg.sub1
+ %6:vreg_128 = REG_SEQUENCE %4, %subreg.sub0_sub1, %5, %subreg.sub2_sub3
+ %7:vreg_64 = COPY %6.sub0_sub1
+ %8:vreg_64 = COPY %6.sub1_sub2
+ %9:vreg_64 = COPY %6.sub2_sub3
+ %10:vgpr_32 = COPY %6.sub2
+ %11:vgpr_32 = COPY %6.sub0
+ $vgpr0_vgpr1 = COPY %7
+ $vgpr2_vgpr3 = COPY %8
+ $vgpr4_vgpr5 = COPY %9
+ $vgpr6 = COPY %10
+ $vgpr6 = COPY %11
+...
diff --git a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
index f5e136a..b717f85 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
@@ -337,8 +337,7 @@ define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out)
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB8_0:
-; GFX942-NEXT: s_mov_b32 s4, 8
-; GFX942-NEXT: s_load_dword s0, s[0:1], s4 offset:0x2
+; GFX942-NEXT: s_load_dword s0, s[0:1], 0xa
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s0
@@ -353,8 +352,7 @@ define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out)
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB8_0:
-; GFX90a-NEXT: s_mov_b32 s0, 8
-; GFX90a-NEXT: s_load_dword s0, s[4:5], s0 offset:0x2
+; GFX90a-NEXT: s_load_dword s0, s[4:5], 0xa
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
index aa131ed..85a9aba 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
@@ -495,8 +495,7 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX900-NEXT: v_mov_b32_e32 v1, s35
; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0
; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX900-NEXT: s_movk_i32 s0, 0x5000
-; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, 0x5000, v0
; GFX900-NEXT: v_mov_b32_e32 v4, 0
; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX900-NEXT: v_mov_b32_e32 v5, 0
@@ -609,8 +608,8 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX10-NEXT: v_mov_b32_e32 v7, 0x7f
; GFX10-NEXT: v_and_b32_e32 v6, 0xfe000000, v1
; GFX10-NEXT: v_lshl_or_b32 v0, v0, 3, v6
-; GFX10-NEXT: v_add_co_u32 v0, s0, v0, s34
-; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, 0, s35, s0
+; GFX10-NEXT: v_add_co_u32 v0, s0, s34, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, s35, 0, s0
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x5000, v0
; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
; GFX10-NEXT: .LBB1_1: ; %for.cond.preheader
@@ -718,8 +717,7 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX90A-NEXT: v_mov_b32_e32 v2, s35
; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, s34, v1
; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v2, vcc
-; GFX90A-NEXT: s_movk_i32 s0, 0x5000
-; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, s0, v1
+; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, 0x5000, v1
; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], 0, 0
; GFX90A-NEXT: v_mov_b32_e32 v1, 0x7f
@@ -821,8 +819,8 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX11-NEXT: v_and_b32_e32 v6, 0xfe000000, v1
; GFX11-NEXT: v_lshl_or_b32 v0, v0, 3, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v0, s0, v0, s34
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, s35, s0
+; GFX11-NEXT: v_add_co_u32 v0, s0, s34, v0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s35, 0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x5000, v0
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll
index ff90f1f..40f39a2 100644
--- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll
+++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tahiti -amdgpu-use-sdag-ptradd=1 < %s | FileCheck --check-prefixes=GFX6,GFX6_PTRADD %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tahiti -amdgpu-use-sdag-ptradd=0 < %s | FileCheck --check-prefixes=GFX6,GFX6_LEGACY %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tahiti < %s | FileCheck --check-prefixes=GFX6 %s
; Test PTRADD handling in AMDGPUDAGToDAGISel::SelectMUBUF.
@@ -34,7 +33,3 @@ define amdgpu_kernel void @v_add_i32(ptr addrspace(1) %out, ptr addrspace(1) %in
store i32 %result, ptr addrspace(1) %out
ret void
}
-
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX6_LEGACY: {{.*}}
-; GFX6_PTRADD: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll
index 7d3b19e..1c986a0 100644
--- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll
+++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -disable-separate-const-offset-from-gep=1 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck --check-prefixes=GFX942,GFX942_PTRADD %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -disable-separate-const-offset-from-gep=1 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck --check-prefixes=GFX942,GFX942_LEGACY %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -disable-separate-const-offset-from-gep=1 < %s | FileCheck --check-prefixes=GFX942 %s
; Tests for DAG combines and folds related to the ISD::PTRADD SelectionDAG
; opcode. The RUN lines uses -disable-separate-const-offset-from-gep to disable
@@ -24,21 +23,13 @@ define i64 @global_load_ZTwoUses(ptr addrspace(1) %base, i64 %voffset) {
}
define i64 @global_load_gep_add_reassoc(ptr addrspace(1) %base, i64 %voffset) {
-; GFX942_PTRADD-LABEL: global_load_gep_add_reassoc:
-; GFX942_PTRADD: ; %bb.0:
-; GFX942_PTRADD-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942_PTRADD-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
-; GFX942_PTRADD-NEXT: global_load_dwordx2 v[0:1], v[0:1], off offset:24
-; GFX942_PTRADD-NEXT: s_waitcnt vmcnt(0)
-; GFX942_PTRADD-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX942_LEGACY-LABEL: global_load_gep_add_reassoc:
-; GFX942_LEGACY: ; %bb.0:
-; GFX942_LEGACY-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942_LEGACY-NEXT: v_lshl_add_u64 v[0:1], v[2:3], 0, v[0:1]
-; GFX942_LEGACY-NEXT: global_load_dwordx2 v[0:1], v[0:1], off offset:24
-; GFX942_LEGACY-NEXT: s_waitcnt vmcnt(0)
-; GFX942_LEGACY-NEXT: s_setpc_b64 s[30:31]
+; GFX942-LABEL: global_load_gep_add_reassoc:
+; GFX942: ; %bb.0:
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GFX942-NEXT: global_load_dwordx2 v[0:1], v[0:1], off offset:24
+; GFX942-NEXT: s_waitcnt vmcnt(0)
+; GFX942-NEXT: s_setpc_b64 s[30:31]
%add0 = add nuw nsw i64 %voffset, 24
%gep0 = getelementptr nuw inbounds i8, ptr addrspace(1) %base, i64 %add0
%l = load i64, ptr addrspace(1) %gep0, align 8
@@ -221,23 +212,14 @@ define ptr addrspace(1) @shl_neg_offset(ptr addrspace(1) %p, i64 %noffset, i64 %
; Check that offsets are folded into global addresses if possible. For example,
; this is relevant when using --amdgpu-lower-module-lds-strategy=table.
define ptr addrspace(1) @complextype_global_gep(i64 %offset) {
-; GFX942_PTRADD-LABEL: complextype_global_gep:
-; GFX942_PTRADD: ; %bb.0:
-; GFX942_PTRADD-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942_PTRADD-NEXT: s_getpc_b64 s[0:1]
-; GFX942_PTRADD-NEXT: s_add_u32 s0, s0, v0@rel32@lo+14
-; GFX942_PTRADD-NEXT: s_addc_u32 s1, s1, v0@rel32@hi+22
-; GFX942_PTRADD-NEXT: v_lshl_add_u64 v[0:1], s[0:1], 0, v[0:1]
-; GFX942_PTRADD-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX942_LEGACY-LABEL: complextype_global_gep:
-; GFX942_LEGACY: ; %bb.0:
-; GFX942_LEGACY-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942_LEGACY-NEXT: s_getpc_b64 s[0:1]
-; GFX942_LEGACY-NEXT: s_add_u32 s0, s0, v0@rel32@lo+14
-; GFX942_LEGACY-NEXT: s_addc_u32 s1, s1, v0@rel32@hi+22
-; GFX942_LEGACY-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
-; GFX942_LEGACY-NEXT: s_setpc_b64 s[30:31]
+; GFX942-LABEL: complextype_global_gep:
+; GFX942: ; %bb.0:
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_getpc_b64 s[0:1]
+; GFX942-NEXT: s_add_u32 s0, s0, v0@rel32@lo+14
+; GFX942-NEXT: s_addc_u32 s1, s1, v0@rel32@hi+22
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], s[0:1], 0, v[0:1]
+; GFX942-NEXT: s_setpc_b64 s[30:31]
%gep0 = getelementptr inbounds %complextype, ptr addrspace(1) @v0, i64 0, i32 1, i64 %offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 2
ret ptr addrspace(1) %gep1
@@ -430,36 +412,20 @@ define ptr @gep_disjoint_or(ptr %base) {
; Check that AssertAlign nodes between ptradd nodes don't block offset folding,
; taken from preload-implicit-kernargs.ll
define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out) {
-; GFX942_PTRADD-LABEL: random_incorrect_offset:
-; GFX942_PTRADD: ; %bb.1:
-; GFX942_PTRADD-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
-; GFX942_PTRADD-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942_PTRADD-NEXT: s_branch .LBB21_0
-; GFX942_PTRADD-NEXT: .p2align 8
-; GFX942_PTRADD-NEXT: ; %bb.2:
-; GFX942_PTRADD-NEXT: .LBB21_0:
-; GFX942_PTRADD-NEXT: s_load_dword s0, s[4:5], 0xa
-; GFX942_PTRADD-NEXT: v_mov_b32_e32 v0, 0
-; GFX942_PTRADD-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942_PTRADD-NEXT: v_mov_b32_e32 v1, s0
-; GFX942_PTRADD-NEXT: global_store_dword v0, v1, s[8:9]
-; GFX942_PTRADD-NEXT: s_endpgm
-;
-; GFX942_LEGACY-LABEL: random_incorrect_offset:
-; GFX942_LEGACY: ; %bb.1:
-; GFX942_LEGACY-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
-; GFX942_LEGACY-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942_LEGACY-NEXT: s_branch .LBB21_0
-; GFX942_LEGACY-NEXT: .p2align 8
-; GFX942_LEGACY-NEXT: ; %bb.2:
-; GFX942_LEGACY-NEXT: .LBB21_0:
-; GFX942_LEGACY-NEXT: s_mov_b32 s0, 8
-; GFX942_LEGACY-NEXT: s_load_dword s0, s[4:5], s0 offset:0x2
-; GFX942_LEGACY-NEXT: v_mov_b32_e32 v0, 0
-; GFX942_LEGACY-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942_LEGACY-NEXT: v_mov_b32_e32 v1, s0
-; GFX942_LEGACY-NEXT: global_store_dword v0, v1, s[8:9]
-; GFX942_LEGACY-NEXT: s_endpgm
+; GFX942-LABEL: random_incorrect_offset:
+; GFX942: ; %bb.1:
+; GFX942-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: s_branch .LBB21_0
+; GFX942-NEXT: .p2align 8
+; GFX942-NEXT: ; %bb.2:
+; GFX942-NEXT: .LBB21_0:
+; GFX942-NEXT: s_load_dword s0, s[4:5], 0xa
+; GFX942-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: v_mov_b32_e32 v1, s0
+; GFX942-NEXT: global_store_dword v0, v1, s[8:9]
+; GFX942-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 2
%load = load i32, ptr addrspace(4) %gep
diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll
index 1934ce3..e7c715f 100644
--- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll
+++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -start-before=amdgpu-isel -amdgpu-use-sdag-ptradd=1 < %s | FileCheck --check-prefixes=GFX942,GFX942_PTRADD %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -start-before=amdgpu-isel -amdgpu-use-sdag-ptradd=0 < %s | FileCheck --check-prefixes=GFX942,GFX942_LEGACY %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -start-before=amdgpu-isel < %s | FileCheck --check-prefixes=GFX942 %s
; Tests for undef and poison DAG folds for the ISD::PTRADD SelectionDAG opcode.
; If any additions are generated for these tests, the folds don't work.
@@ -44,6 +43,3 @@ define ptr @undef_base(ptr %p, i64 %offset) {
%gep1 = getelementptr i8, ptr undef, i64 %offset
ret ptr %gep1
}
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX942_LEGACY: {{.*}}
-; GFX942_PTRADD: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll
index 9dd2502..f4f5a78 100644
--- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll
+++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll
@@ -1,14 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX8,GFX8_PTRADD
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX8,GFX8_LEGACY
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX942,GFX942_PTRADD
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX942,GFX942_LEGACY
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX10,GFX10_PTRADD
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX10,GFX10_LEGACY
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX11,GFX11_PTRADD
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX11,GFX11_LEGACY
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX12,GFX12_PTRADD
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX12,GFX12_LEGACY
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck %s -check-prefixes=GFX8
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 < %s | FileCheck %s -check-prefixes=GFX942
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 < %s | FileCheck %s -check-prefixes=GFX10
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck %s -check-prefixes=GFX11
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 < %s | FileCheck %s -check-prefixes=GFX12
; Tests for the ISD::PTRADD SelectionDAG opcode. This only tests 64-bit address
; spaces since PTRADD is currently only used for these.
@@ -511,15 +506,3 @@ entry:
store i32 %val, ptr addrspace(1) %gep.to, align 4
ret void
}
-
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX10_LEGACY: {{.*}}
-; GFX10_PTRADD: {{.*}}
-; GFX11_LEGACY: {{.*}}
-; GFX11_PTRADD: {{.*}}
-; GFX12_LEGACY: {{.*}}
-; GFX12_PTRADD: {{.*}}
-; GFX8_LEGACY: {{.*}}
-; GFX8_PTRADD: {{.*}}
-; GFX942_LEGACY: {{.*}}
-; GFX942_PTRADD: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll b/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll
index 5d5aad7..566eb1e 100644
--- a/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll
+++ b/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll
@@ -7,16 +7,12 @@
@gv.fptr0 = external hidden unnamed_addr addrspace(4) constant ptr, align 4
-; GCN-LABEL: unreachable:
-; Function info:
-; codeLenInByte = 4
define internal fastcc void @unreachable() {
%fptr = load ptr, ptr addrspace(4) @gv.fptr0
call void %fptr()
unreachable
}
-
; GCN-LABEL: entry:
; GCN-NOT: s_swappc_b64
; GCN: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir
index d7b713a..0b4e662 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir
@@ -19,7 +19,7 @@ body: |
; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX2_]], 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]]
; CHECK-NEXT: [[COPY3:%[0-9]+]].sub2_sub3:areg_128_align2 = IMPLICIT_DEF
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]].sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
@@ -30,7 +30,7 @@ body: |
%4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec
undef %5.sub0_sub1:areg_128_align2 = COPY %4
%5.sub2_sub3 = IMPLICIT_DEF
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5
GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1)
GLOBAL_STORE_DWORDX2 %0, %5.sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
@@ -172,7 +172,7 @@ body: |
; CHECK-NEXT: undef [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]].sub2_sub3:areg_128_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX2_]], 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub2_sub3
; CHECK-NEXT: [[COPY3:%[0-9]+]].sub2_sub3:areg_128_align2 = IMPLICIT_DEF
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]].sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
@@ -183,7 +183,7 @@ body: |
undef %4.sub2_sub3:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec
undef %5.sub0_sub1:areg_128_align2 = COPY %4.sub2_sub3
%5.sub2_sub3 = IMPLICIT_DEF
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5
GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1)
GLOBAL_STORE_DWORDX2 %0, %5.sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
@@ -208,7 +208,7 @@ body: |
; CHECK-NEXT: undef [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]].sub2_sub3:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX2_]], 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]].sub2
; CHECK-NEXT: [[COPY3:%[0-9]+]].sub2_sub3:areg_128_align2 = IMPLICIT_DEF
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]].sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
@@ -219,7 +219,7 @@ body: |
undef %4.sub2_sub3:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec
undef %5.sub1:areg_128_align2 = COPY %4.sub2
%5.sub2_sub3 = IMPLICIT_DEF
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5
GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1)
GLOBAL_STORE_DWORDX2 %0, %5.sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir
index 57f611b..4c2ea2f 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir
@@ -17,7 +17,7 @@ body: |
; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1)
; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
%0:vreg_64_align2 = COPY $vgpr4_vgpr5
@@ -26,7 +26,7 @@ body: |
%3:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1)
%4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
undef %5.sub0_sub1:areg_128_align2 = COPY %4
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5
GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
...
@@ -47,7 +47,7 @@ body: |
; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1)
; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX4_]].sub2_sub3, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
%0:vreg_64_align2 = COPY $vgpr4_vgpr5
@@ -56,7 +56,7 @@ body: |
%3:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1)
%4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3.sub2_sub3, 0, 0, 0, implicit $mode, implicit $exec
undef %5.sub0_sub1:areg_128_align2 = COPY %4
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5
GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
...
@@ -151,7 +151,7 @@ body: |
; CHECK-NEXT: dead %other_use:vreg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_1]].sub0_sub1
; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_2:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_1]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_2]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
%0:vreg_64_align2 = COPY $vgpr4_vgpr5
@@ -163,7 +163,7 @@ body: |
%other_use:vreg_64_align2 = COPY %5.sub0_sub1
%6:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %5.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec
undef %8.sub0_sub1:areg_128_align2 = COPY %6
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %8:areg_128_align2
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %8:areg_128_align2
GLOBAL_STORE_DWORDX4 %0, %8, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
@@ -231,7 +231,7 @@ body: |
; CHECK-NEXT: dead %other_use1:vreg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub2_sub3
; CHECK-NEXT: dead %other_use2:vreg_64 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub1_sub2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]]
- ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]]
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]]
; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: SI_RETURN
%0:vreg_64_align2 = COPY $vgpr4_vgpr5
@@ -245,7 +245,7 @@ body: |
%other_use1:vreg_64_align2 = COPY %4.sub2_sub3
%other_use2:vreg_64 = COPY %4.sub1_sub2
%6:areg_128_align2 = COPY %4
- INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %6:areg_128_align2
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %6:areg_128_align2
GLOBAL_STORE_DWORDX4 %0, %6, 0, 0, implicit $exec :: (store (s128), addrspace 1)
SI_RETURN
...
diff --git a/llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll b/llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll
index 65a99d0..480eb0d 100644
--- a/llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll
+++ b/llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll
@@ -52,11 +52,12 @@ define amdgpu_kernel void @local_store_i55(ptr addrspace(3) %ptr, i55 %arg) #0 {
; HAWAII-LABEL: local_store_i55:
; HAWAII: ; %bb.0:
; HAWAII-NEXT: s_add_i32 s12, s12, s17
-; HAWAII-NEXT: s_or_b32 s0, s8, 14
-; HAWAII-NEXT: s_mov_b32 flat_scratch_lo, s13
; HAWAII-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; HAWAII-NEXT: s_add_u32 s0, s8, 14
+; HAWAII-NEXT: s_addc_u32 s1, s9, 0
; HAWAII-NEXT: v_mov_b32_e32 v0, s0
-; HAWAII-NEXT: v_mov_b32_e32 v1, s9
+; HAWAII-NEXT: s_mov_b32 flat_scratch_lo, s13
+; HAWAII-NEXT: v_mov_b32_e32 v1, s1
; HAWAII-NEXT: flat_load_ubyte v0, v[0:1]
; HAWAII-NEXT: s_load_dword s2, s[8:9], 0x0
; HAWAII-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x2
@@ -74,25 +75,27 @@ define amdgpu_kernel void @local_store_i55(ptr addrspace(3) %ptr, i55 %arg) #0 {
;
; FIJI-LABEL: local_store_i55:
; FIJI: ; %bb.0:
+; FIJI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x8
; FIJI-NEXT: s_add_i32 s12, s12, s17
-; FIJI-NEXT: s_or_b32 s0, s8, 14
-; FIJI-NEXT: s_mov_b32 flat_scratch_lo, s13
; FIJI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; FIJI-NEXT: v_mov_b32_e32 v0, s0
-; FIJI-NEXT: v_mov_b32_e32 v1, s9
-; FIJI-NEXT: flat_load_ubyte v0, v[0:1]
-; FIJI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x8
-; FIJI-NEXT: s_load_dword s2, s[8:9], 0x0
+; FIJI-NEXT: s_mov_b32 flat_scratch_lo, s13
; FIJI-NEXT: s_mov_b32 m0, -1
; FIJI-NEXT: s_waitcnt lgkmcnt(0)
-; FIJI-NEXT: s_and_b32 s3, s1, 0xffff
-; FIJI-NEXT: v_mov_b32_e32 v1, s2
+; FIJI-NEXT: s_and_b32 s4, s1, 0xffff
+; FIJI-NEXT: s_add_u32 s2, s8, 14
+; FIJI-NEXT: s_addc_u32 s3, s9, 0
+; FIJI-NEXT: v_mov_b32_e32 v0, s2
+; FIJI-NEXT: v_mov_b32_e32 v1, s3
+; FIJI-NEXT: flat_load_ubyte v0, v[0:1]
+; FIJI-NEXT: s_load_dword s2, s[8:9], 0x0
; FIJI-NEXT: v_mov_b32_e32 v2, s1
; FIJI-NEXT: v_mov_b32_e32 v3, s0
+; FIJI-NEXT: s_waitcnt lgkmcnt(0)
+; FIJI-NEXT: v_mov_b32_e32 v1, s2
; FIJI-NEXT: ds_write_b16 v1, v2 offset:4
; FIJI-NEXT: s_waitcnt vmcnt(0)
; FIJI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; FIJI-NEXT: v_or_b32_e32 v0, s3, v0
+; FIJI-NEXT: v_or_b32_e32 v0, s4, v0
; FIJI-NEXT: v_bfe_u32 v0, v0, 16, 7
; FIJI-NEXT: ds_write_b8 v1, v0 offset:6
; FIJI-NEXT: ds_write_b32 v1, v3
diff --git a/llvm/test/CodeGen/ARM/and-mask-variable.ll b/llvm/test/CodeGen/ARM/and-mask-variable.ll
new file mode 100644
index 0000000..0f84b76
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/and-mask-variable.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M
+
+define i32 @mask_pair(i32 %x, i32 %y) {
+; V7M-LABEL: mask_pair:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: mask_pair:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: mask_pair:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: mask_pair:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: bx lr
+ %shl = shl nsw i32 -1, %y
+ %and = and i32 %shl, %x
+ ret i32 %and
+}
+
+define i64 @mask_pair_64(i64 %x, i64 %y) {
+; V7M-LABEL: mask_pair_64:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r12, r3, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl.w r12, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r3, r2
+; V7M-NEXT: and.w r0, r0, r12
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: mask_pair_64:
+; V7A: @ %bb.0:
+; V7A-NEXT: subs r12, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lslpl r3, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: and r1, r3, r1
+; V7A-NEXT: and r0, r2, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: mask_pair_64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r12, r3, r2
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w r12, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r3, r2
+; V7A-T-NEXT: and.w r0, r0, r12
+; V7A-T-NEXT: ands r1, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: mask_pair_64:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %shl = shl nsw i64 -1, %y
+ %and = and i64 %shl, %x
+ ret i64 %and
+}
diff --git a/llvm/test/CodeGen/ARM/extract-bits.ll b/llvm/test/CodeGen/ARM/extract-bits.ll
new file mode 100644
index 0000000..77deaa5
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/extract-bits.ll
@@ -0,0 +1,4591 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M
+
+; Patterns:
+; a) (x >> start) & (1 << nbits) - 1
+; b) (x >> start) & ~(-1 << nbits)
+; c) (x >> start) & (-1 >> (32 - y))
+; d) (x >> start) << (32 - y) >> (32 - y)
+; are equivalent.
+
+; ---------------------------------------------------------------------------- ;
+; Pattern a. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_a0:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: movs r1, #1
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_a0:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r2, r3, r12, lsl r2
+; V7A-NEXT: and r0, r2, r0, lsr r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_a0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsls r1, r2
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_a0:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: subs r1, r1, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_a0_arithmetic(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_a0_arithmetic:
+; V7M: @ %bb.0:
+; V7M-NEXT: asrs r0, r1
+; V7M-NEXT: movs r1, #1
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_a0_arithmetic:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r2, r3, r12, lsl r2
+; V7A-NEXT: and r0, r2, r0, asr r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_a0_arithmetic:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: asrs r0, r1
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsls r1, r2
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_a0_arithmetic:
+; V6M: @ %bb.0:
+; V6M-NEXT: asrs r0, r1
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: subs r1, r1, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %shifted = ashr i32 %val, %numskipbits
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr32_a1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: movs r1, #1
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_a1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r2, r3, r12, lsl r2
+; V7A-NEXT: and r0, r2, r0, lsr r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_a1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsls r1, r2
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_a1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: subs r1, r1, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %conv = zext i8 %numlowbits to i32
+ %onebit = shl i32 1, %conv
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_a2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_a2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: movs r1, #1
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_a2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r2, r3, r12, lsl r2
+; V7A-NEXT: and r0, r2, r0, lsr r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_a2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsls r1, r2
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_a2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r3, [r0]
+; V6M-NEXT: lsrs r3, r1
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: ands r0, r3
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %shifted = lshr i32 %val, %numskipbits
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr32_a3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: movs r1, #1
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_a3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r2, r3, r12, lsl r2
+; V7A-NEXT: and r0, r2, r0, lsr r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_a3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsls r1, r2
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_a3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r3, [r0]
+; V6M-NEXT: lsrs r3, r1
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: ands r0, r3
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %conv = zext i8 %numlowbits to i32
+ %onebit = shl i32 1, %conv
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_a4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: movs r1, #1
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_a4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r2, r3, r12, lsl r2
+; V7A-NEXT: and r0, r2, r0, lsr r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_a4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsls r1, r2
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_a4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: subs r1, r1, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %shifted, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_a0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: mov.w lr, #1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: rsb.w r4, r12, #32
+; V7M-NEXT: subs.w r3, r12, #32
+; V7M-NEXT: lsr.w r4, lr, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r4, lr, r3
+; V7M-NEXT: lsl.w r3, lr, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: subs r3, #1
+; V7M-NEXT: sbc r12, r4, #0
+; V7M-NEXT: rsb.w r4, r2, #32
+; V7M-NEXT: lsl.w r4, r1, r4
+; V7M-NEXT: orrs r0, r4
+; V7M-NEXT: subs.w r4, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r4
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: and.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: and.w r1, r1, r12
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r3, r12, #32
+; V7A-NEXT: subs r4, r12, #32
+; V7A-NEXT: lsr r3, lr, r3
+; V7A-NEXT: lslpl r3, lr, r4
+; V7A-NEXT: lsl r4, lr, r12
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: subs r4, r4, #1
+; V7A-NEXT: sbc r12, r3, #0
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r3
+; V7A-NEXT: lsr r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: and r0, r4, r0
+; V7A-NEXT: and r1, r12, r1
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: mov.w lr, #1
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: rsb.w r4, r12, #32
+; V7A-T-NEXT: subs.w r3, r12, #32
+; V7A-T-NEXT: lsr.w r4, lr, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r4, lr, r3
+; V7A-T-NEXT: lsl.w r3, lr, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: subs r3, #1
+; V7A-T-NEXT: sbc r12, r4, #0
+; V7A-T-NEXT: rsb.w r4, r2, #32
+; V7A-T-NEXT: lsl.w r4, r1, r4
+; V7A-T-NEXT: orrs r0, r4
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r4
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: and.w r1, r1, r12
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, r7, lr}
+; V6M-NEXT: push {r4, r5, r6, r7, lr}
+; V6M-NEXT: .pad #12
+; V6M-NEXT: sub sp, #12
+; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r7, #0
+; V6M-NEXT: ldr r2, [sp, #32]
+; V6M-NEXT: mov r1, r7
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: subs r5, r0, #1
+; V6M-NEXT: sbcs r4, r7
+; V6M-NEXT: mov r0, r6
+; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: add sp, #12
+; V6M-NEXT: pop {r4, r5, r6, r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_a0_arithmetic:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: mov.w lr, #1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: rsb.w r4, r12, #32
+; V7M-NEXT: subs.w r3, r12, #32
+; V7M-NEXT: lsr.w r4, lr, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r4, lr, r3
+; V7M-NEXT: lsl.w r3, lr, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: subs r3, #1
+; V7M-NEXT: sbc r12, r4, #0
+; V7M-NEXT: rsb.w r4, r2, #32
+; V7M-NEXT: lsl.w r4, r1, r4
+; V7M-NEXT: orrs r0, r4
+; V7M-NEXT: subs.w r4, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: asrpl.w r0, r1, r4
+; V7M-NEXT: asr.w r2, r1, r2
+; V7M-NEXT: and.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: asrpl r2, r1, #31
+; V7M-NEXT: and.w r1, r12, r2
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a0_arithmetic:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r3, r12, #32
+; V7A-NEXT: subs r4, r12, #32
+; V7A-NEXT: lsr r3, lr, r3
+; V7A-NEXT: lslpl r3, lr, r4
+; V7A-NEXT: lsl r4, lr, r12
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: subs r4, r4, #1
+; V7A-NEXT: sbc r12, r3, #0
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: asr r2, r1, r2
+; V7A-NEXT: asrpl r0, r1, r3
+; V7A-NEXT: asrpl r2, r1, #31
+; V7A-NEXT: and r0, r4, r0
+; V7A-NEXT: and r1, r12, r2
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a0_arithmetic:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: mov.w lr, #1
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: rsb.w r4, r12, #32
+; V7A-T-NEXT: subs.w r3, r12, #32
+; V7A-T-NEXT: lsr.w r4, lr, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r4, lr, r3
+; V7A-T-NEXT: lsl.w r3, lr, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: subs r3, #1
+; V7A-T-NEXT: sbc r12, r4, #0
+; V7A-T-NEXT: rsb.w r4, r2, #32
+; V7A-T-NEXT: lsl.w r4, r1, r4
+; V7A-T-NEXT: orrs r0, r4
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: asrpl.w r0, r1, r4
+; V7A-T-NEXT: asr.w r2, r1, r2
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: asrpl r2, r1, #31
+; V7A-T-NEXT: and.w r1, r12, r2
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a0_arithmetic:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, r7, lr}
+; V6M-NEXT: push {r4, r5, r6, r7, lr}
+; V6M-NEXT: .pad #12
+; V6M-NEXT: sub sp, #12
+; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r7, #0
+; V6M-NEXT: ldr r2, [sp, #32]
+; V6M-NEXT: mov r1, r7
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: subs r5, r0, #1
+; V6M-NEXT: sbcs r4, r7
+; V6M-NEXT: mov r0, r6
+; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT: bl __aeabi_lasr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: add sp, #12
+; V6M-NEXT: pop {r4, r5, r6, r7, pc}
+ %shifted = ashr i64 %val, %numskipbits
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr64_a1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: rsb.w r4, r3, #32
+; V7M-NEXT: mov.w lr, #1
+; V7M-NEXT: subs.w r12, r3, #32
+; V7M-NEXT: lsl.w r3, lr, r3
+; V7M-NEXT: lsr.w r4, lr, r4
+; V7M-NEXT: lsr.w r0, r0, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r4, lr, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: subs r3, #1
+; V7M-NEXT: sbc r12, r4, #0
+; V7M-NEXT: rsb.w r4, r2, #32
+; V7M-NEXT: lsl.w r4, r1, r4
+; V7M-NEXT: orrs r0, r4
+; V7M-NEXT: subs.w r4, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r4
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: and.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: and.w r1, r1, r12
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: rsb r12, r3, #32
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: subs r4, r3, #32
+; V7A-NEXT: lsl r3, lr, r3
+; V7A-NEXT: lsr r12, lr, r12
+; V7A-NEXT: movwpl r3, #0
+; V7A-NEXT: lslpl r12, lr, r4
+; V7A-NEXT: rsb r4, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: subs r3, r3, #1
+; V7A-NEXT: sbc r12, r12, #0
+; V7A-NEXT: orr r0, r0, r1, lsl r4
+; V7A-NEXT: subs r4, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r4
+; V7A-NEXT: lsr r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: and r1, r12, r1
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: rsb.w r4, r3, #32
+; V7A-T-NEXT: mov.w lr, #1
+; V7A-T-NEXT: subs.w r12, r3, #32
+; V7A-T-NEXT: lsl.w r3, lr, r3
+; V7A-T-NEXT: lsr.w r4, lr, r4
+; V7A-T-NEXT: lsr.w r0, r0, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r4, lr, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: subs r3, #1
+; V7A-T-NEXT: sbc r12, r4, #0
+; V7A-T-NEXT: rsb.w r4, r2, #32
+; V7A-T-NEXT: lsl.w r4, r1, r4
+; V7A-T-NEXT: orrs r0, r4
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r4
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: and.w r1, r1, r12
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, r7, lr}
+; V6M-NEXT: push {r4, r5, r6, r7, lr}
+; V6M-NEXT: .pad #12
+; V6M-NEXT: sub sp, #12
+; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r7, #0
+; V6M-NEXT: mov r1, r7
+; V6M-NEXT: mov r2, r3
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: subs r5, r0, #1
+; V6M-NEXT: sbcs r4, r7
+; V6M-NEXT: mov r0, r6
+; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: add sp, #12
+; V6M-NEXT: pop {r4, r5, r6, r7, pc}
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %conv = zext i8 %numlowbits to i64
+ %onebit = shl i64 1, %conv
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_a2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_a2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: mov.w lr, #1
+; V7M-NEXT: rsb.w r1, r12, #32
+; V7M-NEXT: subs.w r3, r12, #32
+; V7M-NEXT: lsr.w r1, lr, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, lr, r3
+; V7M-NEXT: lsl.w r3, lr, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: subs.w lr, r3, #1
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: sbc r12, r1, #0
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: lsl.w r1, r3, r1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: orrs r0, r1
+; V7M-NEXT: subs.w r1, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r3, r1
+; V7M-NEXT: lsr.w r1, r3, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: and.w r0, r0, lr
+; V7M-NEXT: and.w r1, r1, r12
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_a2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r5, r6, lr}
+; V7A-NEXT: push {r4, r5, r6, lr}
+; V7A-NEXT: ldr r1, [sp, #16]
+; V7A-NEXT: mov r3, #1
+; V7A-NEXT: ldr r6, [r0]
+; V7A-NEXT: ldr r5, [r0, #4]
+; V7A-NEXT: rsb r0, r1, #32
+; V7A-NEXT: subs r4, r1, #32
+; V7A-NEXT: lsl r1, r3, r1
+; V7A-NEXT: lsr r0, r3, r0
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: lslpl r0, r3, r4
+; V7A-NEXT: subs r1, r1, #1
+; V7A-NEXT: sbc r3, r0, #0
+; V7A-NEXT: lsr r0, r6, r2
+; V7A-NEXT: rsb r6, r2, #32
+; V7A-NEXT: orr r0, r0, r5, lsl r6
+; V7A-NEXT: subs r6, r2, #32
+; V7A-NEXT: lsrpl r0, r5, r6
+; V7A-NEXT: and r0, r1, r0
+; V7A-NEXT: lsr r1, r5, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: and r1, r3, r1
+; V7A-NEXT: pop {r4, r5, r6, pc}
+;
+; V7A-T-LABEL: bextr64_a2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: movs r3, #1
+; V7A-T-NEXT: ldrd lr, r1, [r0]
+; V7A-T-NEXT: rsb.w r4, r12, #32
+; V7A-T-NEXT: subs.w r0, r12, #32
+; V7A-T-NEXT: lsr.w r4, r3, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r4, r3, r0
+; V7A-T-NEXT: lsl.w r0, r3, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsr.w r3, lr, r2
+; V7A-T-NEXT: subs r0, #1
+; V7A-T-NEXT: sbc r12, r4, #0
+; V7A-T-NEXT: rsb.w r4, r2, #32
+; V7A-T-NEXT: lsl.w r4, r1, r4
+; V7A-T-NEXT: orrs r3, r4
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r3, r1, r4
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: and.w r1, r1, r12
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, r7, lr}
+; V6M-NEXT: push {r4, r5, r6, r7, lr}
+; V6M-NEXT: .pad #4
+; V6M-NEXT: sub sp, #4
+; V6M-NEXT: str r2, [sp] @ 4-byte Spill
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r7, #0
+; V6M-NEXT: ldr r2, [sp, #24]
+; V6M-NEXT: mov r1, r7
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r6, r1
+; V6M-NEXT: subs r4, r0, #1
+; V6M-NEXT: sbcs r6, r7
+; V6M-NEXT: ldm r5!, {r0, r1}
+; V6M-NEXT: ldr r2, [sp] @ 4-byte Reload
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: ands r1, r6
+; V6M-NEXT: add sp, #4
+; V6M-NEXT: pop {r4, r5, r6, r7, pc}
+ %val = load i64, ptr %w
+ %shifted = lshr i64 %val, %numskipbits
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr64_a3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: mov.w r12, #1
+; V7M-NEXT: subs.w lr, r2, #32
+; V7M-NEXT: lsl.w r2, r12, r2
+; V7M-NEXT: lsr.w r3, r12, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r3, r12, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs.w lr, r2, #1
+; V7M-NEXT: ldrd r0, r2, [r0]
+; V7M-NEXT: sbc r12, r3, #0
+; V7M-NEXT: rsb.w r3, r1, #32
+; V7M-NEXT: lsl.w r3, r2, r3
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r1, #32
+; V7M-NEXT: lsr.w r1, r2, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r2, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: and.w r0, r0, lr
+; V7M-NEXT: and.w r1, r1, r12
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_a3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r5, r6, lr}
+; V7A-NEXT: push {r4, r5, r6, lr}
+; V7A-NEXT: ldr r6, [r0]
+; V7A-NEXT: mov r3, #1
+; V7A-NEXT: ldr r5, [r0, #4]
+; V7A-NEXT: rsb r0, r2, #32
+; V7A-NEXT: subs r4, r2, #32
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lsr r0, r3, r0
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: lslpl r0, r3, r4
+; V7A-NEXT: subs r3, r2, #1
+; V7A-NEXT: sbc r0, r0, #0
+; V7A-NEXT: lsr r2, r5, r1
+; V7A-NEXT: subs r4, r1, #32
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: and r2, r0, r2
+; V7A-NEXT: lsr r0, r6, r1
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: orr r0, r0, r5, lsl r1
+; V7A-NEXT: mov r1, r2
+; V7A-NEXT: lsrpl r0, r5, r4
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: pop {r4, r5, r6, pc}
+;
+; V7A-T-LABEL: bextr64_a3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: rsb.w r4, r2, #32
+; V7A-T-NEXT: mov.w lr, #1
+; V7A-T-NEXT: subs.w r3, r2, #32
+; V7A-T-NEXT: lsl.w r2, lr, r2
+; V7A-T-NEXT: lsr.w r4, lr, r4
+; V7A-T-NEXT: ldrd r12, r0, [r0]
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r4, lr, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: subs.w lr, r2, #1
+; V7A-T-NEXT: sbc r2, r4, #0
+; V7A-T-NEXT: lsr.w r4, r0, r1
+; V7A-T-NEXT: subs.w r3, r1, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r4, #0
+; V7A-T-NEXT: and.w r2, r2, r4
+; V7A-T-NEXT: rsb.w r4, r1, #32
+; V7A-T-NEXT: lsr.w r1, r12, r1
+; V7A-T-NEXT: lsl.w r4, r0, r4
+; V7A-T-NEXT: orr.w r1, r1, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r1, r0, r3
+; V7A-T-NEXT: and.w r0, lr, r1
+; V7A-T-NEXT: mov r1, r2
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, r7, lr}
+; V6M-NEXT: push {r4, r5, r6, r7, lr}
+; V6M-NEXT: .pad #4
+; V6M-NEXT: sub sp, #4
+; V6M-NEXT: str r1, [sp] @ 4-byte Spill
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r7, #0
+; V6M-NEXT: mov r1, r7
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: subs r4, r0, #1
+; V6M-NEXT: sbcs r5, r7
+; V6M-NEXT: ldm r6!, {r0, r1}
+; V6M-NEXT: ldr r2, [sp] @ 4-byte Reload
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: ands r1, r5
+; V6M-NEXT: add sp, #4
+; V6M-NEXT: pop {r4, r5, r6, r7, pc}
+ %val = load i64, ptr %w
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %conv = zext i8 %numlowbits to i64
+ %onebit = shl i64 1, %conv
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_a4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: mov.w lr, #1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: rsb.w r4, r12, #32
+; V7M-NEXT: subs.w r3, r12, #32
+; V7M-NEXT: lsr.w r4, lr, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r4, lr, r3
+; V7M-NEXT: lsl.w r3, lr, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: subs r3, #1
+; V7M-NEXT: sbc r12, r4, #0
+; V7M-NEXT: rsb.w r4, r2, #32
+; V7M-NEXT: lsl.w r4, r1, r4
+; V7M-NEXT: orrs r0, r4
+; V7M-NEXT: subs.w r4, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r4
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: and.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: and.w r1, r1, r12
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r3, r12, #32
+; V7A-NEXT: subs r4, r12, #32
+; V7A-NEXT: lsr r3, lr, r3
+; V7A-NEXT: lslpl r3, lr, r4
+; V7A-NEXT: lsl r4, lr, r12
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: subs r4, r4, #1
+; V7A-NEXT: sbc r12, r3, #0
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r3
+; V7A-NEXT: lsr r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: and r0, r0, r4
+; V7A-NEXT: and r1, r1, r12
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: mov.w lr, #1
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: rsb.w r4, r12, #32
+; V7A-T-NEXT: subs.w r3, r12, #32
+; V7A-T-NEXT: lsr.w r4, lr, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r4, lr, r3
+; V7A-T-NEXT: lsl.w r3, lr, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: subs r3, #1
+; V7A-T-NEXT: sbc r12, r4, #0
+; V7A-T-NEXT: rsb.w r4, r2, #32
+; V7A-T-NEXT: lsl.w r4, r1, r4
+; V7A-T-NEXT: orrs r0, r4
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r4
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: and.w r1, r1, r12
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, r7, lr}
+; V6M-NEXT: push {r4, r5, r6, r7, lr}
+; V6M-NEXT: .pad #12
+; V6M-NEXT: sub sp, #12
+; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r7, #0
+; V6M-NEXT: ldr r2, [sp, #32]
+; V6M-NEXT: mov r1, r7
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: subs r5, r0, #1
+; V6M-NEXT: sbcs r4, r7
+; V6M-NEXT: mov r0, r6
+; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: add sp, #12
+; V6M-NEXT: pop {r4, r5, r6, r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %shifted, %mask ; swapped order
+ ret i64 %masked
+}
+
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_a0:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: lsls r2, r1
+; V7M-NEXT: subs r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs r1, r2, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_a0:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldr r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: mov r1, #1
+; V7A-NEXT: lsl r1, r1, r12
+; V7A-NEXT: subs r2, r12, #32
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: sub r1, r1, #1
+; V7A-NEXT: and r0, r1, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_a0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsl.w r1, r1, r12
+; V7A-T-NEXT: subs.w r2, r12, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_a0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: ldr r2, [sp, #8]
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: pop {r4, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %shifted
+ %res = trunc i64 %masked to i32
+ ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_a1:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_a1:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: add r12, r3, lr, lsl r12
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: and r0, r12, r0
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_32_a1:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsl.w r1, r1, r12
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_a1:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r1, [sp, #8]
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: pop {r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %truncshifted = trunc i64 %shifted to i32
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %truncshifted
+ ret i32 %masked
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_a2:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_a2:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: mov lr, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: add r12, r3, lr, lsl r12
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: and r0, r12, r0
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_32_a2:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: lsl.w r1, r1, r12
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_a2:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r1, [sp, #8]
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: pop {r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %zextmask = zext i32 %mask to i64
+ %masked = and i64 %zextmask, %shifted
+ %truncmasked = trunc i64 %masked to i32
+ ret i32 %truncmasked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern b. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_b0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_b0:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: bics r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_b0:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_b0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: lsl.w r2, r3, r2
+; V7A-T-NEXT: bics r0, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_b0:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: mvns r1, r1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %notmask = shl i32 -1, %numlowbits
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr32_b1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: bics r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_b1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_b1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: lsl.w r2, r3, r2
+; V7A-T-NEXT: bics r0, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_b1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: mvns r1, r1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: bx lr
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %conv = zext i8 %numlowbits to i32
+ %notmask = shl i32 -1, %conv
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_b2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_b2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bics r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_b2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_b2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r2, r3, r2
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bics r0, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_b2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #0
+; V6M-NEXT: mvns r3, r3
+; V6M-NEXT: lsls r3, r2
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bics r0, r3
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %shifted = lshr i32 %val, %numskipbits
+ %notmask = shl i32 -1, %numlowbits
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr32_b3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bics r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_b3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_b3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r2, r3, r2
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bics r0, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_b3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #0
+; V6M-NEXT: mvns r3, r3
+; V6M-NEXT: lsls r3, r2
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bics r0, r3
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %conv = zext i8 %numlowbits to i32
+ %notmask = shl i32 -1, %conv
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_b4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_b4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: bics r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_b4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_b4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: lsl.w r2, r3, r2
+; V7A-T-NEXT: bics r0, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_b4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: mvns r1, r1
+; V6M-NEXT: lsls r1, r2
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %notmask = shl i32 -1, %numlowbits
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %shifted, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bextr64_b0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_b0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: lsl.w r3, r2, r12
+; V7M-NEXT: subs.w lr, r12, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r2, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: bics r1, r2
+; V7M-NEXT: bics r0, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_b0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r3
+; V7A-NEXT: lsr r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: subs lr, r12, #32
+; V7A-NEXT: lsl r2, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r0, r0, r2
+; V7A-NEXT: lslpl r3, r3, lr
+; V7A-NEXT: bic r1, r1, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_b0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, r5, r7, lr}
+; V7A-T-NEXT: push {r4, r5, r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp, #16]
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r5, r0, r3
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: subs.w lr, r12, #32
+; V7A-T-NEXT: lsl.w r0, r3, r12
+; V7A-T-NEXT: itt pl
+; V7A-T-NEXT: lslpl.w r3, r3, lr
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r5, r1, r4
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: bic.w r0, r5, r0
+; V7A-T-NEXT: bics r1, r3
+; V7A-T-NEXT: pop {r4, r5, r7, pc}
+;
+; V6M-LABEL: bextr64_b0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: ldr r2, [sp, #16]
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r4, r0
+; V6M-NEXT: bics r5, r1
+; V6M-NEXT: mov r0, r4
+; V6M-NEXT: mov r1, r5
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %notmask = shl i64 -1, %numlowbits
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_b1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr64_b1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsr.w r12, r0, r2
+; V7M-NEXT: rsb.w r0, r2, #32
+; V7M-NEXT: lsl.w r0, r1, r0
+; V7M-NEXT: orr.w r12, r12, r0
+; V7M-NEXT: subs.w r0, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r12, r1, r0
+; V7M-NEXT: lsr.w r0, r1, r2
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: subs.w r1, r3, #32
+; V7M-NEXT: lsl.w r3, r2, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r2, r1
+; V7M-NEXT: bic.w r1, r0, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: bic.w r0, r12, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_b1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r12, r0, r2
+; V7A-NEXT: rsb r0, r2, #32
+; V7A-NEXT: orr r12, r12, r1, lsl r0
+; V7A-NEXT: subs r0, r2, #32
+; V7A-NEXT: lsrpl r12, r1, r0
+; V7A-NEXT: lsr r0, r1, r2
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: subs r1, r3, #32
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: lsl r3, r2, r3
+; V7A-NEXT: lslpl r2, r2, r1
+; V7A-NEXT: bic r1, r0, r2
+; V7A-NEXT: movwpl r3, #0
+; V7A-NEXT: bic r0, r12, r3
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_b1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsr.w r12, r0, r2
+; V7A-T-NEXT: rsb.w r0, r2, #32
+; V7A-T-NEXT: lsl.w r0, r1, r0
+; V7A-T-NEXT: orr.w r12, r12, r0
+; V7A-T-NEXT: subs.w r0, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r12, r1, r0
+; V7A-T-NEXT: lsr.w r0, r1, r2
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: subs.w r1, r3, #32
+; V7A-T-NEXT: lsl.w r3, r2, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r2, r1
+; V7A-T-NEXT: bic.w r1, r0, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: bic.w r0, r12, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_b1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r4, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: mov r6, r1
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r5, r0
+; V6M-NEXT: bics r6, r1
+; V6M-NEXT: mov r0, r5
+; V6M-NEXT: mov r1, r6
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %conv = zext i8 %numlowbits to i64
+ %notmask = shl i64 -1, %conv
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_b2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_b2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: lsl.w r1, r3, r1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: orrs r0, r1
+; V7M-NEXT: subs.w r1, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r3, r1
+; V7M-NEXT: lsr.w r1, r3, r2
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: lsl.w r3, r2, r12
+; V7M-NEXT: subs.w lr, r12, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r2, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: bics r1, r2
+; V7M-NEXT: bics r0, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_b2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: ldrd r0, r1, [r0]
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r3
+; V7A-NEXT: lsr r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: subs lr, r12, #32
+; V7A-NEXT: lsl r2, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r0, r0, r2
+; V7A-NEXT: lslpl r3, r3, lr
+; V7A-NEXT: bic r1, r1, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_b2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: ldrd r0, r3, [r0]
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: lsl.w r1, r3, r1
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: orrs r0, r1
+; V7A-T-NEXT: subs.w r1, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r3, r1
+; V7A-T-NEXT: lsr.w r1, r3, r2
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: lsl.w r2, r3, r12
+; V7A-T-NEXT: subs.w lr, r12, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r3, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: bics r1, r3
+; V7A-T-NEXT: bics r0, r2
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bextr64_b2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: ldr r3, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: mov r0, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: ldr r2, [sp, #16]
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r4, r0
+; V6M-NEXT: bics r5, r1
+; V6M-NEXT: mov r0, r4
+; V6M-NEXT: mov r1, r5
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %val = load i64, ptr %w
+ %shifted = lshr i64 %val, %numskipbits
+ %notmask = shl i64 -1, %numlowbits
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bextr64_b3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: ldrd r12, r0, [r0]
+; V7M-NEXT: rsb.w r3, r1, #32
+; V7M-NEXT: lsl.w lr, r0, r3
+; V7M-NEXT: lsr.w r3, r12, r1
+; V7M-NEXT: orr.w r12, r3, lr
+; V7M-NEXT: subs.w r3, r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r12, r0, r3
+; V7M-NEXT: lsr.w r0, r0, r1
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: subs.w r1, r2, #32
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r3, r1
+; V7M-NEXT: bic.w r1, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: bic.w r0, r12, r2
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_b3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldm r0, {r0, r3}
+; V7A-NEXT: lsr r12, r0, r1
+; V7A-NEXT: rsb r0, r1, #32
+; V7A-NEXT: orr r12, r12, r3, lsl r0
+; V7A-NEXT: subs r0, r1, #32
+; V7A-NEXT: lsrpl r12, r3, r0
+; V7A-NEXT: lsr r0, r3, r1
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: subs r1, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lslpl r3, r3, r1
+; V7A-NEXT: bic r1, r0, r3
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r0, r12, r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_b3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: ldrd r12, r3, [r0]
+; V7A-T-NEXT: rsb.w r0, r1, #32
+; V7A-T-NEXT: lsl.w lr, r3, r0
+; V7A-T-NEXT: lsr.w r0, r12, r1
+; V7A-T-NEXT: orr.w r12, r0, lr
+; V7A-T-NEXT: subs.w r0, r1, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r12, r3, r0
+; V7A-T-NEXT: lsr.w r0, r3, r1
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: subs.w r1, r2, #32
+; V7A-T-NEXT: lsl.w r2, r3, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r3, r1
+; V7A-T-NEXT: bic.w r1, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: bic.w r0, r12, r2
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bextr64_b3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r4, r2
+; V6M-NEXT: mov r2, r1
+; V6M-NEXT: ldr r3, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: mov r0, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: mov r6, r1
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r5, r0
+; V6M-NEXT: bics r6, r1
+; V6M-NEXT: mov r0, r5
+; V6M-NEXT: mov r1, r6
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %val = load i64, ptr %w
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %conv = zext i8 %numlowbits to i64
+ %notmask = shl i64 -1, %conv
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_b4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_b4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: lsl.w r3, r2, r12
+; V7M-NEXT: subs.w lr, r12, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r2, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: bics r1, r2
+; V7M-NEXT: bics r0, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_b4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsrpl r0, r1, r3
+; V7A-NEXT: lsr r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: subs lr, r12, #32
+; V7A-NEXT: lsl r2, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r0, r0, r2
+; V7A-NEXT: lslpl r3, r3, lr
+; V7A-NEXT: bic r1, r1, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_b4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, r5, r7, lr}
+; V7A-T-NEXT: push {r4, r5, r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp, #16]
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r5, r0, r3
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: subs.w lr, r12, #32
+; V7A-T-NEXT: lsl.w r0, r3, r12
+; V7A-T-NEXT: itt pl
+; V7A-T-NEXT: lslpl.w r3, r3, lr
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: subs.w r4, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r5, r1, r4
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: bic.w r0, r5, r0
+; V7A-T-NEXT: bics r1, r3
+; V7A-T-NEXT: pop {r4, r5, r7, pc}
+;
+; V6M-LABEL: bextr64_b4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: ldr r2, [sp, #16]
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r4, r0
+; V6M-NEXT: bics r5, r1
+; V6M-NEXT: mov r0, r4
+; V6M-NEXT: mov r1, r5
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %notmask = shl i64 -1, %numlowbits
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %shifted, %mask ; swapped order
+ ret i64 %masked
+}
+
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_b0:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldrb.w r1, [sp]
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsls r2, r1
+; V7M-NEXT: subs r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: bics r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_b0:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldrb r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: lsl r1, r1, r12
+; V7A-NEXT: subs r2, r12, #32
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: bic r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_b0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsr.w r12, r0, r2
+; V7A-T-NEXT: rsb.w r0, r2, #32
+; V7A-T-NEXT: ldrb.w r3, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r0, r1, r0
+; V7A-T-NEXT: orr.w r0, r0, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: mov.w r1, #-1
+; V7A-T-NEXT: lsls r1, r3
+; V7A-T-NEXT: subs.w r2, r3, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_b0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: add r1, sp, #8
+; V6M-NEXT: ldrb r2, [r1]
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r4, r0
+; V6M-NEXT: mov r0, r4
+; V6M-NEXT: pop {r4, pc}
+ %shiftedval = lshr i64 %val, %numskipbits
+ %widenumlowbits = zext i8 %numlowbits to i64
+ %notmask = shl nsw i64 -1, %widenumlowbits
+ %mask = xor i64 %notmask, -1
+ %wideres = and i64 %shiftedval, %mask
+ %res = trunc i64 %wideres to i32
+ ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_b1:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldrb.w r1, [sp]
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_b1:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldrb r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r12
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_b1:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldrb.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: mov.w r1, #-1
+; V7A-T-NEXT: lsl.w r1, r1, r12
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_b1:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: add r1, sp, #8
+; V6M-NEXT: ldrb r1, [r1]
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: pop {r7, pc}
+ %shiftedval = lshr i64 %val, %numskipbits
+ %truncshiftedval = trunc i64 %shiftedval to i32
+ %widenumlowbits = zext i8 %numlowbits to i32
+ %notmask = shl nsw i32 -1, %widenumlowbits
+ %mask = xor i32 %notmask, -1
+ %res = and i32 %truncshiftedval, %mask
+ ret i32 %res
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_b2:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldrb.w r1, [sp]
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_b2:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldrb r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: bic r0, r0, r1, lsl r12
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_b2:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldrb.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: mov.w r1, #-1
+; V7A-T-NEXT: lsl.w r1, r1, r12
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_b2:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: add r1, sp, #8
+; V6M-NEXT: ldrb r1, [r1]
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: pop {r7, pc}
+ %shiftedval = lshr i64 %val, %numskipbits
+ %widenumlowbits = zext i8 %numlowbits to i32
+ %notmask = shl nsw i32 -1, %widenumlowbits
+ %mask = xor i32 %notmask, -1
+ %zextmask = zext i32 %mask to i64
+ %wideres = and i64 %shiftedval, %zextmask
+ %res = trunc i64 %wideres to i32
+ ret i32 %res
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern c. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_c0:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_c0:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_c0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_c0:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #32
+; V6M-NEXT: subs r2, r3, r2
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: lsrs r0, r2
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_c1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_c1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_c1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_c1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #32
+; V6M-NEXT: subs r1, r1, r2
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %mask = lshr i32 -1, %sh_prom
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_c2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_c2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_c2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_c2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_c2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #32
+; V6M-NEXT: subs r2, r3, r2
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: lsrs r0, r2
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %shifted = lshr i32 %val, %numskipbits
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_c3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_c3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_c3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_c3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #32
+; V6M-NEXT: subs r1, r1, r2
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %mask = lshr i32 -1, %sh_prom
+ %masked = and i32 %mask, %shifted
+ ret i32 %masked
+}
+
+define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_c4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_c4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_c4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_c4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #32
+; V6M-NEXT: subs r2, r3, r2
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: lsrs r0, r2
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %shifted, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_c0:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: ldr.w r12, [sp]
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: rsb.w r3, r12, #64
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: lsr.w r3, r2, r3
+; V7M-NEXT: rsbs.w r12, r12, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r2, r2, r12
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_c0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r5, r11, lr}
+; V7A-NEXT: push {r4, r5, r11, lr}
+; V7A-NEXT: ldr r12, [sp, #16]
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsr r5, r1, r2
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r4, r12, #64
+; V7A-NEXT: rsbs lr, r12, #32
+; V7A-NEXT: lsr r4, r3, r4
+; V7A-NEXT: lsrpl r3, r3, lr
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: subs lr, r2, #32
+; V7A-NEXT: rsb r2, r2, #32
+; V7A-NEXT: movwpl r5, #0
+; V7A-NEXT: and r12, r4, r5
+; V7A-NEXT: orr r0, r0, r1, lsl r2
+; V7A-NEXT: lsrpl r0, r1, lr
+; V7A-NEXT: mov r1, r12
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: pop {r4, r5, r11, pc}
+;
+; V7A-T-LABEL: bextr64_c0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: mov.w lr, #-1
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orrs r0, r3
+; V7A-T-NEXT: subs.w r3, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r3
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: rsbs.w r2, r12, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r3, r2
+; V7A-T-NEXT: rsb.w r2, r12, #64
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: lsr.w r2, lr, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: ands r1, r2
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bextr64_c0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: ldr r0, [sp, #16]
+; V6M-NEXT: movs r1, #64
+; V6M-NEXT: subs r2, r1, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_c1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: uxtb r2, r2
+; V7M-NEXT: lsr.w r12, r0, r2
+; V7M-NEXT: rsb.w r0, r2, #32
+; V7M-NEXT: lsl.w r0, r1, r0
+; V7M-NEXT: orr.w r12, r12, r0
+; V7M-NEXT: subs.w r0, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r12, r1, r0
+; V7M-NEXT: rsb.w r0, r3, #64
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: uxtb r0, r0
+; V7M-NEXT: subs.w lr, r0, #32
+; V7M-NEXT: lsr.w r2, r3, r0
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r3, r3, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: and.w r0, r3, r12
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_c1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: uxtb r12, r2
+; V7A-NEXT: lsr lr, r0, r12
+; V7A-NEXT: rsb r0, r12, #32
+; V7A-NEXT: orr r4, lr, r1, lsl r0
+; V7A-NEXT: mvn lr, #31
+; V7A-NEXT: uxtab r2, lr, r2
+; V7A-NEXT: cmp r2, #0
+; V7A-NEXT: lsrpl r4, r1, r2
+; V7A-NEXT: rsb r2, r3, #64
+; V7A-NEXT: lsr r1, r1, r12
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: uxtb r12, r2
+; V7A-NEXT: uxtab r2, lr, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: lsr r0, r3, r12
+; V7A-NEXT: cmp r2, #0
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: and r1, r0, r1
+; V7A-NEXT: lsrpl r3, r3, r2
+; V7A-NEXT: and r0, r3, r4
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_c1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: uxtb.w r12, r2
+; V7A-T-NEXT: lsr.w lr, r0, r12
+; V7A-T-NEXT: rsb.w r0, r12, #32
+; V7A-T-NEXT: lsl.w r0, r1, r0
+; V7A-T-NEXT: orr.w r4, lr, r0
+; V7A-T-NEXT: mvn lr, #31
+; V7A-T-NEXT: uxtab r2, lr, r2
+; V7A-T-NEXT: cmp r2, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r4, r1, r2
+; V7A-T-NEXT: rsb.w r2, r3, #64
+; V7A-T-NEXT: lsr.w r1, r1, r12
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: uxtb.w r12, r2
+; V7A-T-NEXT: uxtab r2, lr, r2
+; V7A-T-NEXT: lsr.w r0, r3, r12
+; V7A-T-NEXT: cmp r2, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: and.w r1, r1, r0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r3, r2
+; V7A-T-NEXT: and.w r0, r3, r4
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_c1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r5, r3
+; V6M-NEXT: uxtb r2, r2
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r0, r0, r5
+; V6M-NEXT: uxtb r2, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r6
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %mask = lshr i64 -1, %sh_prom
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_c2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_c2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: ldr.w r12, [sp]
+; V7M-NEXT: lsl.w r1, r3, r1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: orrs r0, r1
+; V7M-NEXT: subs.w r1, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r3, r1
+; V7M-NEXT: lsr.w r1, r3, r2
+; V7M-NEXT: rsb.w r3, r12, #64
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: rsbs.w r12, r12, #32
+; V7M-NEXT: lsr.w r3, r2, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r2, r2, r12
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_c2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r6, r8, lr}
+; V7A-NEXT: push {r4, r6, r8, lr}
+; V7A-NEXT: ldr r12, [sp, #16]
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: rsb r6, r12, #64
+; V7A-NEXT: ldr r8, [r0]
+; V7A-NEXT: mvn r0, #0
+; V7A-NEXT: rsbs r1, r12, #32
+; V7A-NEXT: lsr r6, r0, r6
+; V7A-NEXT: lsr r4, r3, r2
+; V7A-NEXT: lsrpl r0, r0, r1
+; V7A-NEXT: movwpl r6, #0
+; V7A-NEXT: subs r12, r2, #32
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: and r1, r6, r4
+; V7A-NEXT: lsr r6, r8, r2
+; V7A-NEXT: rsb r2, r2, #32
+; V7A-NEXT: orr r2, r6, r3, lsl r2
+; V7A-NEXT: lsrpl r2, r3, r12
+; V7A-NEXT: and r0, r0, r2
+; V7A-NEXT: pop {r4, r6, r8, pc}
+;
+; V7A-T-LABEL: bextr64_c2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldrd r0, r3, [r0]
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: lsl.w r1, r3, r1
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: orrs r0, r1
+; V7A-T-NEXT: subs.w r1, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r3, r1
+; V7A-T-NEXT: lsr.w r1, r3, r2
+; V7A-T-NEXT: rsb.w r2, r12, #64
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: rsbs.w r12, r12, #32
+; V7A-T-NEXT: lsr.w r2, r3, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r3, r3, r12
+; V7A-T-NEXT: ands r1, r2
+; V7A-T-NEXT: ands r0, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_c2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: ldr r3, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: mov r0, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: ldr r0, [sp, #16]
+; V6M-NEXT: movs r1, #64
+; V6M-NEXT: subs r2, r1, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %val = load i64, ptr %w
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_c3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsr.w r12, r0, r1
+; V7M-NEXT: rsb.w r0, r1, #32
+; V7M-NEXT: lsl.w r0, r3, r0
+; V7M-NEXT: orr.w r12, r12, r0
+; V7M-NEXT: subs.w r0, r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r12, r3, r0
+; V7M-NEXT: rsb.w r0, r2, #64
+; V7M-NEXT: lsr.w r1, r3, r1
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: uxtb r0, r0
+; V7M-NEXT: subs.w lr, r0, #32
+; V7M-NEXT: lsr.w r2, r3, r0
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r3, r3, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: and.w r0, r3, r12
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bextr64_c3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: ldr r4, [r0]
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: uxtb r0, r1
+; V7A-NEXT: lsr r12, r4, r0
+; V7A-NEXT: rsb r4, r0, #32
+; V7A-NEXT: lsr r0, r3, r0
+; V7A-NEXT: orr lr, r12, r3, lsl r4
+; V7A-NEXT: mvn r12, #31
+; V7A-NEXT: uxtab r1, r12, r1
+; V7A-NEXT: cmp r1, #0
+; V7A-NEXT: lsrpl lr, r3, r1
+; V7A-NEXT: rsb r1, r2, #64
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: uxtb r2, r1
+; V7A-NEXT: uxtab r4, r12, r1
+; V7A-NEXT: lsr r2, r3, r2
+; V7A-NEXT: cmp r4, #0
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: and r1, r2, r0
+; V7A-NEXT: lsrpl r3, r3, r4
+; V7A-NEXT: and r0, r3, lr
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_c3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, r5, r7, lr}
+; V7A-T-NEXT: push {r4, r5, r7, lr}
+; V7A-T-NEXT: ldrd r12, lr, [r0]
+; V7A-T-NEXT: uxtb r0, r1
+; V7A-T-NEXT: rsb.w r3, r0, #32
+; V7A-T-NEXT: lsl.w r4, lr, r3
+; V7A-T-NEXT: lsr.w r3, r12, r0
+; V7A-T-NEXT: orr.w r5, r3, r4
+; V7A-T-NEXT: mvn r12, #31
+; V7A-T-NEXT: uxtab r1, r12, r1
+; V7A-T-NEXT: lsr.w r0, lr, r0
+; V7A-T-NEXT: cmp r1, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r5, lr, r1
+; V7A-T-NEXT: rsb.w r1, r2, #64
+; V7A-T-NEXT: mov.w r4, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: uxtb r2, r1
+; V7A-T-NEXT: uxtab r3, r12, r1
+; V7A-T-NEXT: lsr.w r2, r4, r2
+; V7A-T-NEXT: cmp r3, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: and.w r1, r2, r0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r4, r3
+; V7A-T-NEXT: and.w r0, r4, r5
+; V7A-T-NEXT: pop {r4, r5, r7, pc}
+;
+; V6M-LABEL: bextr64_c3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r5, r2
+; V6M-NEXT: ldr r4, [r0]
+; V6M-NEXT: ldr r3, [r0, #4]
+; V6M-NEXT: uxtb r2, r1
+; V6M-NEXT: mov r0, r4
+; V6M-NEXT: mov r1, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r6, r0
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r0, r0, r5
+; V6M-NEXT: uxtb r2, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r6
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %val = load i64, ptr %w
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %mask = lshr i64 -1, %sh_prom
+ %masked = and i64 %mask, %shifted
+ ret i64 %masked
+}
+
+define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_c4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: ldr.w r12, [sp]
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: rsb.w r3, r12, #64
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: lsr.w r3, r2, r3
+; V7M-NEXT: rsbs.w r12, r12, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r2, r2, r12
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_c4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r5, r11, lr}
+; V7A-NEXT: push {r4, r5, r11, lr}
+; V7A-NEXT: ldr r12, [sp, #16]
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsr r5, r1, r2
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r4, r12, #64
+; V7A-NEXT: rsbs lr, r12, #32
+; V7A-NEXT: lsr r4, r3, r4
+; V7A-NEXT: lsrpl r3, r3, lr
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: subs lr, r2, #32
+; V7A-NEXT: rsb r2, r2, #32
+; V7A-NEXT: movwpl r5, #0
+; V7A-NEXT: and r12, r5, r4
+; V7A-NEXT: orr r0, r0, r1, lsl r2
+; V7A-NEXT: lsrpl r0, r1, lr
+; V7A-NEXT: mov r1, r12
+; V7A-NEXT: and r0, r0, r3
+; V7A-NEXT: pop {r4, r5, r11, pc}
+;
+; V7A-T-LABEL: bextr64_c4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: mov.w lr, #-1
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orrs r0, r3
+; V7A-T-NEXT: subs.w r3, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r3
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: rsbs.w r2, r12, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r3, r2
+; V7A-T-NEXT: rsb.w r2, r12, #64
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: lsr.w r2, lr, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: ands r1, r2
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bextr64_c4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: ldr r0, [sp, #16]
+; V6M-NEXT: movs r1, #64
+; V6M-NEXT: subs r2, r1, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %shifted, %mask ; swapped order
+ ret i64 %masked
+}
+
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_c0:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: rsbs.w r1, r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl r2, r1
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_c0:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r3, [sp]
+; V7A-NEXT: rsbs r12, r3, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsrpl r3, r3, r12
+; V7A-NEXT: lsr r12, r0, r2
+; V7A-NEXT: rsb r0, r2, #32
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r12, r1, lsl r0
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_c0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: rsbs.w r1, r12, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r2, r1
+; V7A-T-NEXT: ands r0, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_c0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: ldr r0, [sp, #8]
+; V6M-NEXT: movs r1, #64
+; V6M-NEXT: subs r2, r1, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: pop {r4, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %mask, %shifted
+ %res = trunc i64 %masked to i32
+ ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_c1:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_c1:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldr r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: rsb r1, r12, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_c1:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: rsb.w r1, r12, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_c1:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r1, [sp, #8]
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: pop {r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %truncshifted = trunc i64 %shifted to i32
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %mask, %truncshifted
+ ret i32 %masked
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_c2:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_c2:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldr r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: rsb r1, r12, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_c2:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: rsb.w r1, r12, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_c2:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r1, [sp, #8]
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: pop {r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %zextmask = zext i32 %mask to i64
+ %masked = and i64 %zextmask, %shifted
+ %truncmasked = trunc i64 %masked to i32
+ ret i32 %truncmasked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern d. 32-bit.
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_d0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_d0:
+; V7M: @ %bb.0:
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_d0:
+; V7A: @ %bb.0:
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_d0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_d0:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #32
+; V6M-NEXT: subs r2, r3, r2
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: lsrs r0, r2
+; V6M-NEXT: bx lr
+ %shifted = lshr i32 %val, %numskipbits
+ %numhighbits = sub i32 32, %numlowbits
+ %highbitscleared = shl i32 %shifted, %numhighbits
+ %masked = lshr i32 %highbitscleared, %numhighbits
+ ret i32 %masked
+}
+
+define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_d1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_d1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_d1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_d1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #32
+; V6M-NEXT: subs r1, r1, r2
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %highbitscleared = shl i32 %shifted, %sh_prom
+ %masked = lshr i32 %highbitscleared, %sh_prom
+ ret i32 %masked
+}
+
+define i32 @bextr32_d2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_d2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_d2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_d2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_d2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r3, #32
+; V6M-NEXT: subs r2, r3, r2
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r2
+; V6M-NEXT: lsrs r0, r2
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %shifted = lshr i32 %val, %numskipbits
+ %numhighbits = sub i32 32, %numlowbits
+ %highbitscleared = shl i32 %shifted, %numhighbits
+ %masked = lshr i32 %highbitscleared, %numhighbits
+ ret i32 %masked
+}
+
+define i32 @bextr32_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr32_d3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr32_d3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: rsb r1, r2, #32
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr32_d3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr32_d3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: movs r1, #32
+; V6M-NEXT: subs r1, r1, r2
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %skip = zext i8 %numskipbits to i32
+ %shifted = lshr i32 %val, %skip
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %highbitscleared = shl i32 %shifted, %sh_prom
+ %masked = lshr i32 %highbitscleared, %sh_prom
+ ret i32 %masked
+}
+
+; 64-bit.
+
+define i64 @bextr64_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_d0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: rsb.w r3, r12, #64
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: rsb.w lr, r12, #32
+; V7M-NEXT: rsb.w r12, r3, #32
+; V7M-NEXT: lsls r1, r3
+; V7M-NEXT: cmp.w lr, #0
+; V7M-NEXT: lsr.w r4, r0, r12
+; V7M-NEXT: orr.w r1, r1, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r0, lr
+; V7M-NEXT: lsl.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r2, r1, r12
+; V7M-NEXT: lsr.w r0, r0, r3
+; V7M-NEXT: orr.w r0, r0, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, lr
+; V7M-NEXT: lsr.w r1, r1, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_d0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: lsr r3, r1, r2
+; V7A-NEXT: subs lr, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r2, r2, #32
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: movwpl r3, #0
+; V7A-NEXT: orr r0, r0, r1, lsl r2
+; V7A-NEXT: lsrpl r0, r1, lr
+; V7A-NEXT: rsb r1, r12, #64
+; V7A-NEXT: rsb lr, r1, #32
+; V7A-NEXT: lsr r2, r0, lr
+; V7A-NEXT: orr r2, r2, r3, lsl r1
+; V7A-NEXT: rsbs r3, r12, #32
+; V7A-NEXT: lslpl r2, r0, r3
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: lsr r1, r2, r1
+; V7A-NEXT: orr r0, r0, r2, lsl lr
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: lsrpl r0, r2, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_d0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orrs r0, r3
+; V7A-T-NEXT: subs.w r3, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r3
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: rsb.w r3, r12, #64
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: rsb.w lr, r3, #32
+; V7A-T-NEXT: lsls r1, r3
+; V7A-T-NEXT: rsbs.w r2, r12, #32
+; V7A-T-NEXT: lsr.w r4, r0, lr
+; V7A-T-NEXT: orr.w r1, r1, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r1, r0, r2
+; V7A-T-NEXT: lsl.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r4, r1, lr
+; V7A-T-NEXT: lsr.w r0, r0, r3
+; V7A-T-NEXT: orr.w r0, r0, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: lsr.w r1, r1, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_d0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r2, [sp, #8]
+; V6M-NEXT: movs r3, #64
+; V6M-NEXT: subs r4, r3, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %highbitscleared = shl i64 %shifted, %numhighbits
+ %masked = lshr i64 %highbitscleared, %numhighbits
+ ret i64 %masked
+}
+
+define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_d1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: uxtb.w lr, r2
+; V7M-NEXT: subs.w r2, lr, #32
+; V7M-NEXT: lsr.w r12, r0, lr
+; V7M-NEXT: rsb.w r0, lr, #32
+; V7M-NEXT: lsl.w r0, r1, r0
+; V7M-NEXT: orr.w r0, r0, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: rsb.w r2, r3, #64
+; V7M-NEXT: lsr.w r1, r1, lr
+; V7M-NEXT: uxtb r2, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: rsb.w r12, r2, #32
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: sub.w r3, r2, #32
+; V7M-NEXT: lsr.w r4, r0, r12
+; V7M-NEXT: orrs r1, r4
+; V7M-NEXT: cmp r3, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r0, r3
+; V7M-NEXT: lsl.w r0, r0, r2
+; V7M-NEXT: lsl.w r4, r1, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsr.w r0, r0, r2
+; V7M-NEXT: orr.w r0, r0, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_d1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r5, r11, lr}
+; V7A-NEXT: push {r4, r5, r11, lr}
+; V7A-NEXT: uxtb r12, r2
+; V7A-NEXT: lsr lr, r0, r12
+; V7A-NEXT: rsb r0, r12, #32
+; V7A-NEXT: orr r0, lr, r1, lsl r0
+; V7A-NEXT: mvn lr, #31
+; V7A-NEXT: uxtab r2, lr, r2
+; V7A-NEXT: cmp r2, #0
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: rsb r2, r3, #64
+; V7A-NEXT: lsr r1, r1, r12
+; V7A-NEXT: uxtb r3, r2
+; V7A-NEXT: rsb r4, r3, #32
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: uxtab r2, lr, r2
+; V7A-NEXT: lsr r5, r0, r4
+; V7A-NEXT: orr r1, r5, r1, lsl r3
+; V7A-NEXT: cmp r2, #0
+; V7A-NEXT: lslpl r1, r0, r2
+; V7A-NEXT: lsl r0, r0, r3
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r3
+; V7A-NEXT: orr r0, r0, r1, lsl r4
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: lsr r1, r1, r3
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: pop {r4, r5, r11, pc}
+;
+; V7A-T-LABEL: bextr64_d1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, r5, r6, r7, lr}
+; V7A-T-NEXT: push {r4, r5, r6, r7, lr}
+; V7A-T-NEXT: uxtb.w r12, r2
+; V7A-T-NEXT: rsb.w r6, r12, #32
+; V7A-T-NEXT: rsb.w r3, r3, #64
+; V7A-T-NEXT: lsr.w r0, r0, r12
+; V7A-T-NEXT: mvn r7, #31
+; V7A-T-NEXT: uxtab r2, r7, r2
+; V7A-T-NEXT: lsl.w r6, r1, r6
+; V7A-T-NEXT: lsr.w lr, r1, r12
+; V7A-T-NEXT: orrs r0, r6
+; V7A-T-NEXT: cmp r2, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w lr, #0
+; V7A-T-NEXT: uxtb r5, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: rsb.w r1, r5, #32
+; V7A-T-NEXT: uxtab r3, r7, r3
+; V7A-T-NEXT: lsl.w r4, lr, r5
+; V7A-T-NEXT: lsr.w r2, r0, r1
+; V7A-T-NEXT: cmp r3, #0
+; V7A-T-NEXT: orr.w r2, r2, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r2, r0, r3
+; V7A-T-NEXT: lsl.w r0, r0, r5
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: lsr.w r0, r0, r5
+; V7A-T-NEXT: orr.w r0, r0, r1
+; V7A-T-NEXT: lsr.w r1, r2, r5
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r2, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r4, r5, r6, r7, pc}
+;
+; V6M-LABEL: bextr64_d1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: mov r4, r3
+; V6M-NEXT: uxtb r2, r2
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: movs r2, #64
+; V6M-NEXT: subs r2, r2, r4
+; V6M-NEXT: uxtb r4, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %highbitscleared = shl i64 %shifted, %sh_prom
+ %masked = lshr i64 %highbitscleared, %sh_prom
+ ret i64 %masked
+}
+
+define i64 @bextr64_d2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_d2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: lsl.w r1, r3, r1
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: rsb.w lr, r12, #32
+; V7M-NEXT: orrs r0, r1
+; V7M-NEXT: subs.w r1, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r3, r1
+; V7M-NEXT: rsb.w r1, r12, #64
+; V7M-NEXT: lsr.w r2, r3, r2
+; V7M-NEXT: rsb.w r12, r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: cmp.w lr, #0
+; V7M-NEXT: lsl.w r2, r2, r1
+; V7M-NEXT: lsr.w r4, r0, r12
+; V7M-NEXT: orr.w r2, r2, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r0, lr
+; V7M-NEXT: lsl.w r0, r0, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r3, r2, r12
+; V7M-NEXT: lsr.w r0, r0, r1
+; V7M-NEXT: lsr.w r1, r2, r1
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r2, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_d2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: ldrd r0, r1, [r0]
+; V7A-NEXT: subs lr, r2, #32
+; V7A-NEXT: lsr r3, r1, r2
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: movwpl r3, #0
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r2
+; V7A-NEXT: lsrpl r0, r1, lr
+; V7A-NEXT: rsb r1, r12, #64
+; V7A-NEXT: rsb lr, r1, #32
+; V7A-NEXT: lsr r2, r0, lr
+; V7A-NEXT: orr r2, r2, r3, lsl r1
+; V7A-NEXT: rsbs r3, r12, #32
+; V7A-NEXT: lslpl r2, r0, r3
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: lsr r1, r2, r1
+; V7A-NEXT: orr r0, r0, r2, lsl lr
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: lsrpl r0, r2, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_d2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: ldrd r0, r3, [r0]
+; V7A-T-NEXT: rsb.w r1, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: lsl.w r1, r3, r1
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: orrs r0, r1
+; V7A-T-NEXT: subs.w r1, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r3, r1
+; V7A-T-NEXT: lsr.w r2, r3, r2
+; V7A-T-NEXT: rsb.w r1, r12, #64
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: rsb.w lr, r1, #32
+; V7A-T-NEXT: rsbs.w r3, r12, #32
+; V7A-T-NEXT: lsl.w r2, r2, r1
+; V7A-T-NEXT: lsr.w r4, r0, lr
+; V7A-T-NEXT: orr.w r2, r2, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r2, r0, r3
+; V7A-T-NEXT: lsl.w r0, r0, r1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r4, r2, lr
+; V7A-T-NEXT: lsr.w r0, r0, r1
+; V7A-T-NEXT: lsr.w r1, r2, r1
+; V7A-T-NEXT: orr.w r0, r0, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r2, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_d2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: ldr r3, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: mov r0, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r2, [sp, #8]
+; V6M-NEXT: movs r3, #64
+; V6M-NEXT: subs r4, r3, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %highbitscleared = shl i64 %shifted, %numhighbits
+ %masked = lshr i64 %highbitscleared, %numhighbits
+ ret i64 %masked
+}
+
+define i64 @bextr64_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_d3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: ldrd r0, lr, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: rsb.w r2, r2, #64
+; V7M-NEXT: subs.w r3, r1, #32
+; V7M-NEXT: lsr.w r12, r0, r1
+; V7M-NEXT: rsb.w r0, r1, #32
+; V7M-NEXT: lsr.w r1, lr, r1
+; V7M-NEXT: uxtb r2, r2
+; V7M-NEXT: lsl.w r0, lr, r0
+; V7M-NEXT: orr.w r0, r0, r12
+; V7M-NEXT: rsb.w r12, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, lr, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: lsls r1, r2
+; V7M-NEXT: sub.w r3, r2, #32
+; V7M-NEXT: lsr.w r4, r0, r12
+; V7M-NEXT: orrs r1, r4
+; V7M-NEXT: cmp r3, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r0, r3
+; V7M-NEXT: lsl.w r0, r0, r2
+; V7M-NEXT: lsl.w r4, r1, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsr.w r0, r0, r2
+; V7M-NEXT: orr.w r0, r0, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_d3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r5, r11, lr}
+; V7A-NEXT: push {r4, r5, r11, lr}
+; V7A-NEXT: ldr r4, [r0]
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: uxtb r0, r1
+; V7A-NEXT: lsr r12, r4, r0
+; V7A-NEXT: rsb r4, r0, #32
+; V7A-NEXT: lsr r0, r3, r0
+; V7A-NEXT: orr r4, r12, r3, lsl r4
+; V7A-NEXT: mvn r12, #31
+; V7A-NEXT: uxtab r1, r12, r1
+; V7A-NEXT: cmp r1, #0
+; V7A-NEXT: lsrpl r4, r3, r1
+; V7A-NEXT: rsb r1, r2, #64
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: uxtb r2, r1
+; V7A-NEXT: rsb lr, r2, #32
+; V7A-NEXT: uxtab r1, r12, r1
+; V7A-NEXT: lsr r5, r4, lr
+; V7A-NEXT: orr r3, r5, r0, lsl r2
+; V7A-NEXT: cmp r1, #0
+; V7A-NEXT: lsl r0, r4, r2
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lslpl r3, r4, r1
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: orr r0, r0, r3, lsl lr
+; V7A-NEXT: lsrpl r0, r3, r1
+; V7A-NEXT: lsr r1, r3, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: pop {r4, r5, r11, pc}
+;
+; V7A-T-LABEL: bextr64_d3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, r5, r6, lr}
+; V7A-T-NEXT: push {r4, r5, r6, lr}
+; V7A-T-NEXT: ldrd r12, lr, [r0]
+; V7A-T-NEXT: uxtb r0, r1
+; V7A-T-NEXT: rsb.w r6, r0, #32
+; V7A-T-NEXT: lsr.w r3, lr, r0
+; V7A-T-NEXT: rsb.w r2, r2, #64
+; V7A-T-NEXT: mvn r4, #31
+; V7A-T-NEXT: lsr.w r0, r12, r0
+; V7A-T-NEXT: uxtab r1, r4, r1
+; V7A-T-NEXT: lsl.w r6, lr, r6
+; V7A-T-NEXT: orrs r0, r6
+; V7A-T-NEXT: cmp r1, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: uxtb r5, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, lr, r1
+; V7A-T-NEXT: rsb.w r1, r5, #32
+; V7A-T-NEXT: lsls r3, r5
+; V7A-T-NEXT: uxtab r2, r4, r2
+; V7A-T-NEXT: lsr.w r6, r0, r1
+; V7A-T-NEXT: orrs r3, r6
+; V7A-T-NEXT: cmp r2, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r0, r2
+; V7A-T-NEXT: lsl.w r0, r0, r5
+; V7A-T-NEXT: lsl.w r1, r3, r1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsr.w r0, r0, r5
+; V7A-T-NEXT: orr.w r0, r0, r1
+; V7A-T-NEXT: lsr.w r1, r3, r5
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r3, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r4, r5, r6, pc}
+;
+; V6M-LABEL: bextr64_d3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r2
+; V6M-NEXT: ldr r5, [r0]
+; V6M-NEXT: ldr r3, [r0, #4]
+; V6M-NEXT: uxtb r2, r1
+; V6M-NEXT: mov r0, r5
+; V6M-NEXT: mov r1, r3
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: movs r2, #64
+; V6M-NEXT: subs r2, r2, r4
+; V6M-NEXT: uxtb r4, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %val = load i64, ptr %w
+ %skip = zext i8 %numskipbits to i64
+ %shifted = lshr i64 %val, %skip
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %highbitscleared = shl i64 %shifted, %sh_prom
+ %masked = lshr i64 %highbitscleared, %sh_prom
+ ret i64 %masked
+}
+
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_d0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r4, lr}
+; V7M-NEXT: push {r4, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: ldr.w r12, [sp, #8]
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orrs r0, r3
+; V7M-NEXT: subs.w r3, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r3
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: rsb.w r3, r12, #64
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: rsb.w lr, r12, #32
+; V7M-NEXT: rsb.w r12, r3, #32
+; V7M-NEXT: lsls r1, r3
+; V7M-NEXT: cmp.w lr, #0
+; V7M-NEXT: lsr.w r4, r0, r12
+; V7M-NEXT: orr.w r1, r1, r4
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r0, lr
+; V7M-NEXT: lsl.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r2, r1, r12
+; V7M-NEXT: lsr.w r0, r0, r3
+; V7M-NEXT: orr.w r0, r0, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, lr
+; V7M-NEXT: pop {r4, pc}
+;
+; V7A-LABEL: bextr64_32_d0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: lsr r3, r1, r2
+; V7A-NEXT: subs lr, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: rsb r2, r2, #32
+; V7A-NEXT: ldr r12, [sp, #8]
+; V7A-NEXT: movwpl r3, #0
+; V7A-NEXT: orr r0, r0, r1, lsl r2
+; V7A-NEXT: lsrpl r0, r1, lr
+; V7A-NEXT: rsb r1, r12, #64
+; V7A-NEXT: rsb lr, r1, #32
+; V7A-NEXT: lsr r2, r0, lr
+; V7A-NEXT: orr r2, r2, r3, lsl r1
+; V7A-NEXT: rsbs r3, r12, #32
+; V7A-NEXT: lslpl r2, r0, r3
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: orr r0, r0, r2, lsl lr
+; V7A-NEXT: lsrpl r0, r2, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_32_d0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: ldr.w r12, [sp, #8]
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orrs r0, r3
+; V7A-T-NEXT: subs.w r3, r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r3
+; V7A-T-NEXT: lsr.w r1, r1, r2
+; V7A-T-NEXT: rsb.w r3, r12, #64
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: rsb.w lr, r3, #32
+; V7A-T-NEXT: lsls r1, r3
+; V7A-T-NEXT: rsbs.w r2, r12, #32
+; V7A-T-NEXT: lsr.w r4, r0, lr
+; V7A-T-NEXT: orr.w r1, r1, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r1, r0, r2
+; V7A-T-NEXT: lsl.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r4, r1, lr
+; V7A-T-NEXT: lsr.w r0, r0, r3
+; V7A-T-NEXT: orr.w r0, r0, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bextr64_32_d0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r2, [sp, #8]
+; V6M-NEXT: movs r3, #64
+; V6M-NEXT: subs r4, r3, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %numhighbits = sub i64 64, %numlowbits
+ %highbitscleared = shl i64 %shifted, %numhighbits
+ %masked = lshr i64 %highbitscleared, %numhighbits
+ %res = trunc i64 %masked to i32
+ ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; V7M-LABEL: bextr64_32_d1:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsrs r0, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: ldr r1, [sp]
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bextr64_32_d1:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: ldr r12, [sp]
+; V7A-NEXT: subs r2, r2, #32
+; V7A-NEXT: orr r0, r0, r1, lsl r3
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: rsb r1, r12, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bextr64_32_d1:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: lsrs r0, r2
+; V7A-T-NEXT: ldr.w r12, [sp]
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: lsl.w r3, r1, r3
+; V7A-T-NEXT: orr.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: rsb.w r1, r12, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bextr64_32_d1:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r7, lr}
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldr r1, [sp, #8]
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: pop {r7, pc}
+ %shifted = lshr i64 %val, %numskipbits
+ %truncshifted = trunc i64 %shifted to i32
+ %numhighbits = sub i32 32, %numlowbits
+ %highbitscleared = shl i32 %truncshifted, %numhighbits
+ %masked = lshr i32 %highbitscleared, %numhighbits
+ ret i32 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Constant
+; ---------------------------------------------------------------------------- ;
+
+; https://bugs.llvm.org/show_bug.cgi?id=38938
+define void @pr38938(ptr %a0, ptr %a1) nounwind {
+; V7M-LABEL: pr38938:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r1, [r1]
+; V7M-NEXT: ubfx r1, r1, #21, #10
+; V7M-NEXT: ldr.w r2, [r0, r1, lsl #2]
+; V7M-NEXT: adds r2, #1
+; V7M-NEXT: str.w r2, [r0, r1, lsl #2]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: pr38938:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r1, [r1]
+; V7A-NEXT: ubfx r1, r1, #21, #10
+; V7A-NEXT: ldr r2, [r0, r1, lsl #2]
+; V7A-NEXT: add r2, r2, #1
+; V7A-NEXT: str r2, [r0, r1, lsl #2]
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: pr38938:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r1, [r1]
+; V7A-T-NEXT: ubfx r1, r1, #21, #10
+; V7A-T-NEXT: ldr.w r2, [r0, r1, lsl #2]
+; V7A-T-NEXT: adds r2, #1
+; V7A-T-NEXT: str.w r2, [r0, r1, lsl #2]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: pr38938:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, [r1]
+; V6M-NEXT: lsrs r1, r1, #19
+; V6M-NEXT: ldr r2, .LCPI51_0
+; V6M-NEXT: ands r2, r1
+; V6M-NEXT: ldr r1, [r0, r2]
+; V6M-NEXT: adds r1, r1, #1
+; V6M-NEXT: str r1, [r0, r2]
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI51_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp = load i64, ptr %a1, align 8
+ %tmp1 = lshr i64 %tmp, 21
+ %tmp2 = and i64 %tmp1, 1023
+ %tmp3 = getelementptr inbounds i32, ptr %a0, i64 %tmp2
+ %tmp4 = load i32, ptr %tmp3, align 4
+ %tmp5 = add nsw i32 %tmp4, 1
+ store i32 %tmp5, ptr %tmp3, align 4
+ ret void
+}
+
+; The most canonical variant
+define i32 @c0_i32(i32 %arg) nounwind {
+; V7M-LABEL: c0_i32:
+; V7M: @ %bb.0:
+; V7M-NEXT: ubfx r0, r0, #19, #10
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c0_i32:
+; V7A: @ %bb.0:
+; V7A-NEXT: ubfx r0, r0, #19, #10
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c0_i32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ubfx r0, r0, #19, #10
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c0_i32:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsls r0, r0, #3
+; V6M-NEXT: lsrs r0, r0, #22
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 1023
+ ret i32 %tmp1
+}
+
+; Should be still fine, but the mask is shifted
+define i32 @c1_i32(i32 %arg) nounwind {
+; V7M-LABEL: c1_i32:
+; V7M: @ %bb.0:
+; V7M-NEXT: movw r1, #4092
+; V7M-NEXT: and.w r0, r1, r0, lsr #19
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c1_i32:
+; V7A: @ %bb.0:
+; V7A-NEXT: movw r1, #4092
+; V7A-NEXT: and r0, r1, r0, lsr #19
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c1_i32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movw r1, #4092
+; V7A-T-NEXT: and.w r0, r1, r0, lsr #19
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c1_i32:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r1, r0, #19
+; V6M-NEXT: ldr r0, .LCPI53_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI53_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 4092
+ ret i32 %tmp1
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define i32 @c2_i32(i32 %arg) nounwind {
+; V7M-LABEL: c2_i32:
+; V7M: @ %bb.0:
+; V7M-NEXT: movw r1, #4092
+; V7M-NEXT: and.w r0, r1, r0, lsr #17
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c2_i32:
+; V7A: @ %bb.0:
+; V7A-NEXT: movw r1, #4092
+; V7A-NEXT: and r0, r1, r0, lsr #17
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c2_i32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movw r1, #4092
+; V7A-T-NEXT: and.w r0, r1, r0, lsr #17
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c2_i32:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r1, r0, #17
+; V6M-NEXT: ldr r0, .LCPI54_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI54_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 1023
+ %tmp2 = shl i32 %tmp1, 2
+ ret i32 %tmp2
+}
+
+; The mask covers newly shifted-in bit
+define i32 @c4_i32_bad(i32 %arg) nounwind {
+; V7M-LABEL: c4_i32_bad:
+; V7M: @ %bb.0:
+; V7M-NEXT: mvn r1, #1
+; V7M-NEXT: and.w r0, r1, r0, lsr #19
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c4_i32_bad:
+; V7A: @ %bb.0:
+; V7A-NEXT: mvn r1, #1
+; V7A-NEXT: and r0, r1, r0, lsr #19
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c4_i32_bad:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mvn r1, #1
+; V7A-T-NEXT: and.w r0, r1, r0, lsr #19
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c4_i32_bad:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r0, #20
+; V6M-NEXT: lsls r0, r0, #1
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 16382
+ ret i32 %tmp1
+}
+
+; i64
+
+; The most canonical variant
+define i64 @c0_i64(i64 %arg) nounwind {
+; V7M-LABEL: c0_i64:
+; V7M: @ %bb.0:
+; V7M-NEXT: ubfx r0, r1, #19, #10
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c0_i64:
+; V7A: @ %bb.0:
+; V7A-NEXT: ubfx r0, r1, #19, #10
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c0_i64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ubfx r0, r1, #19, #10
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c0_i64:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsls r0, r1, #3
+; V6M-NEXT: lsrs r0, r0, #22
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 1023
+ ret i64 %tmp1
+}
+
+; Should be still fine, but the mask is shifted
+define i64 @c1_i64(i64 %arg) nounwind {
+; V7M-LABEL: c1_i64:
+; V7M: @ %bb.0:
+; V7M-NEXT: movw r0, #4092
+; V7M-NEXT: and.w r0, r0, r1, lsr #19
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c1_i64:
+; V7A: @ %bb.0:
+; V7A-NEXT: movw r0, #4092
+; V7A-NEXT: and r0, r0, r1, lsr #19
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c1_i64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movw r0, #4092
+; V7A-T-NEXT: and.w r0, r0, r1, lsr #19
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c1_i64:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r1, r1, #19
+; V6M-NEXT: ldr r0, .LCPI57_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI57_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 4092
+ ret i64 %tmp1
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define i64 @c2_i64(i64 %arg) nounwind {
+; V7M-LABEL: c2_i64:
+; V7M: @ %bb.0:
+; V7M-NEXT: movw r0, #4092
+; V7M-NEXT: and.w r0, r0, r1, lsr #17
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c2_i64:
+; V7A: @ %bb.0:
+; V7A-NEXT: movw r0, #4092
+; V7A-NEXT: and r0, r0, r1, lsr #17
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c2_i64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movw r0, #4092
+; V7A-T-NEXT: and.w r0, r0, r1, lsr #17
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c2_i64:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r1, r1, #17
+; V6M-NEXT: ldr r0, .LCPI58_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI58_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 1023
+ %tmp2 = shl i64 %tmp1, 2
+ ret i64 %tmp2
+}
+
+; The mask covers newly shifted-in bit
+define i64 @c4_i64_bad(i64 %arg) nounwind {
+; V7M-LABEL: c4_i64_bad:
+; V7M: @ %bb.0:
+; V7M-NEXT: mvn r0, #1
+; V7M-NEXT: and.w r0, r0, r1, lsr #19
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c4_i64_bad:
+; V7A: @ %bb.0:
+; V7A-NEXT: mvn r0, #1
+; V7A-NEXT: and r0, r0, r1, lsr #19
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c4_i64_bad:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mvn r0, #1
+; V7A-T-NEXT: and.w r0, r0, r1, lsr #19
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c4_i64_bad:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r1, #20
+; V6M-NEXT: lsls r0, r0, #1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 16382
+ ret i64 %tmp1
+}
+
+; ---------------------------------------------------------------------------- ;
+; Constant, storing the result afterwards.
+; ---------------------------------------------------------------------------- ;
+
+; i32
+
+; The most canonical variant
+define void @c5_i32(i32 %arg, ptr %ptr) nounwind {
+; V7M-LABEL: c5_i32:
+; V7M: @ %bb.0:
+; V7M-NEXT: ubfx r0, r0, #19, #10
+; V7M-NEXT: str r0, [r1]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c5_i32:
+; V7A: @ %bb.0:
+; V7A-NEXT: ubfx r0, r0, #19, #10
+; V7A-NEXT: str r0, [r1]
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c5_i32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ubfx r0, r0, #19, #10
+; V7A-T-NEXT: str r0, [r1]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c5_i32:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsls r0, r0, #3
+; V6M-NEXT: lsrs r0, r0, #22
+; V6M-NEXT: str r0, [r1]
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 1023
+ store i32 %tmp1, ptr %ptr
+ ret void
+}
+
+; Should be still fine, but the mask is shifted
+define void @c6_i32(i32 %arg, ptr %ptr) nounwind {
+; V7M-LABEL: c6_i32:
+; V7M: @ %bb.0:
+; V7M-NEXT: ubfx r0, r0, #19, #12
+; V7M-NEXT: str r0, [r1]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c6_i32:
+; V7A: @ %bb.0:
+; V7A-NEXT: ubfx r0, r0, #19, #12
+; V7A-NEXT: str r0, [r1]
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c6_i32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ubfx r0, r0, #19, #12
+; V7A-T-NEXT: str r0, [r1]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c6_i32:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsls r0, r0, #1
+; V6M-NEXT: lsrs r0, r0, #20
+; V6M-NEXT: str r0, [r1]
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 4095
+ store i32 %tmp1, ptr %ptr
+ ret void
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define void @c7_i32(i32 %arg, ptr %ptr) nounwind {
+; V7M-LABEL: c7_i32:
+; V7M: @ %bb.0:
+; V7M-NEXT: movw r2, #4092
+; V7M-NEXT: and.w r0, r2, r0, lsr #17
+; V7M-NEXT: str r0, [r1]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c7_i32:
+; V7A: @ %bb.0:
+; V7A-NEXT: movw r2, #4092
+; V7A-NEXT: and r0, r2, r0, lsr #17
+; V7A-NEXT: str r0, [r1]
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c7_i32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movw r2, #4092
+; V7A-T-NEXT: and.w r0, r2, r0, lsr #17
+; V7A-T-NEXT: str r0, [r1]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c7_i32:
+; V6M: @ %bb.0:
+; V6M-NEXT: lsrs r0, r0, #17
+; V6M-NEXT: ldr r2, .LCPI62_0
+; V6M-NEXT: ands r2, r0
+; V6M-NEXT: str r2, [r1]
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI62_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp0 = lshr i32 %arg, 19
+ %tmp1 = and i32 %tmp0, 1023
+ %tmp2 = shl i32 %tmp1, 2
+ store i32 %tmp2, ptr %ptr
+ ret void
+}
+
+; i64
+
+; The most canonical variant
+define void @c5_i64(i64 %arg, ptr %ptr) nounwind {
+; V7M-LABEL: c5_i64:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r0, #0
+; V7M-NEXT: ubfx r1, r1, #19, #10
+; V7M-NEXT: strd r1, r0, [r2]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c5_i64:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r0, #0
+; V7A-NEXT: str r0, [r2, #4]
+; V7A-NEXT: ubfx r0, r1, #19, #10
+; V7A-NEXT: str r0, [r2]
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c5_i64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r0, #0
+; V7A-T-NEXT: ubfx r1, r1, #19, #10
+; V7A-T-NEXT: strd r1, r0, [r2]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c5_i64:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: lsls r1, r1, #3
+; V6M-NEXT: lsrs r1, r1, #22
+; V6M-NEXT: str r1, [r2]
+; V6M-NEXT: str r0, [r2, #4]
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 1023
+ store i64 %tmp1, ptr %ptr
+ ret void
+}
+
+; Should be still fine, but the mask is shifted
+define void @c6_i64(i64 %arg, ptr %ptr) nounwind {
+; V7M-LABEL: c6_i64:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r0, #0
+; V7M-NEXT: ubfx r1, r1, #19, #12
+; V7M-NEXT: strd r1, r0, [r2]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c6_i64:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r0, #0
+; V7A-NEXT: str r0, [r2, #4]
+; V7A-NEXT: ubfx r0, r1, #19, #12
+; V7A-NEXT: str r0, [r2]
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c6_i64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r0, #0
+; V7A-T-NEXT: ubfx r1, r1, #19, #12
+; V7A-T-NEXT: strd r1, r0, [r2]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c6_i64:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: lsls r1, r1, #1
+; V6M-NEXT: lsrs r1, r1, #20
+; V6M-NEXT: str r1, [r2]
+; V6M-NEXT: str r0, [r2, #4]
+; V6M-NEXT: bx lr
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 4095
+ store i64 %tmp1, ptr %ptr
+ ret void
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define void @c7_i64(i64 %arg, ptr %ptr) nounwind {
+; V7M-LABEL: c7_i64:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r0, #0
+; V7M-NEXT: movw r3, #4092
+; V7M-NEXT: and.w r1, r3, r1, lsr #17
+; V7M-NEXT: strd r1, r0, [r2]
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: c7_i64:
+; V7A: @ %bb.0:
+; V7A-NEXT: movw r0, #4092
+; V7A-NEXT: mov r3, #0
+; V7A-NEXT: and r0, r0, r1, lsr #17
+; V7A-NEXT: stm r2, {r0, r3}
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: c7_i64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r0, #0
+; V7A-T-NEXT: movw r3, #4092
+; V7A-T-NEXT: and.w r1, r3, r1, lsr #17
+; V7A-T-NEXT: strd r1, r0, [r2]
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: c7_i64:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: lsrs r1, r1, #17
+; V6M-NEXT: ldr r3, .LCPI65_0
+; V6M-NEXT: ands r3, r1
+; V6M-NEXT: str r3, [r2]
+; V6M-NEXT: str r0, [r2, #4]
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI65_0:
+; V6M-NEXT: .long 4092 @ 0xffc
+ %tmp0 = lshr i64 %arg, 51
+ %tmp1 = and i64 %tmp0, 1023
+ %tmp2 = shl i64 %tmp1, 2
+ store i64 %tmp2, ptr %ptr
+ ret void
+}
diff --git a/llvm/test/CodeGen/ARM/extract-lowbits.ll b/llvm/test/CodeGen/ARM/extract-lowbits.ll
new file mode 100644
index 0000000..b483793
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/extract-lowbits.ll
@@ -0,0 +1,2752 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M
+
+; Patterns:
+; a) x & (1 << nbits) - 1
+; b) x & ~(-1 << nbits)
+; c) x & (-1 >> (32 - y))
+; d) x << (32 - y) >> (32 - y)
+; are equivalent.
+
+; ---------------------------------------------------------------------------- ;
+; Pattern a. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_a0(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_a0:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_a0:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r2, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r1, r3, r2, lsl r1
+; V7A-NEXT: and r0, r1, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_a0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r2, #1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_a0:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_a1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_a1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r2, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r1, r3, r2, lsl r1
+; V7A-NEXT: and r0, r1, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_a1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r2, #1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_a1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %conv = zext i8 %numlowbits to i32
+ %onebit = shl i32 1, %conv
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_a2_load(ptr %w, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_a2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_a2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r2, #1
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r1, r3, r2, lsl r1
+; V7A-NEXT: and r0, r1, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_a2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r2, #1
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_a2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_a3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_a3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r2, #1
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r1, r3, r2, lsl r1
+; V7A-NEXT: and r0, r1, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_a3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r2, #1
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_a3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %conv = zext i8 %numlowbits to i32
+ %onebit = shl i32 1, %conv
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_a4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_a4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: movs r2, #1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: subs r1, #1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_a4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: mov r2, #1
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: add r1, r3, r2, lsl r1
+; V7A-NEXT: and r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_a4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: movs r2, #1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: subs r1, #1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_a4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #1
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: subs r1, r2, #1
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %onebit = shl i32 1, %numlowbits
+ %mask = add nsw i32 %onebit, -1
+ %masked = and i32 %val, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_a0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: mov.w r12, #1
+; V7M-NEXT: subs.w lr, r2, #32
+; V7M-NEXT: lsl.w r2, r12, r2
+; V7M-NEXT: lsr.w r3, r12, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r3, r12, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs r2, #1
+; V7M-NEXT: sbc r3, r3, #0
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_a0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: lsr lr, r12, r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsl r2, r12, r2
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: lslpl lr, r12, r3
+; V7A-NEXT: subs r2, r2, #1
+; V7A-NEXT: sbc r3, lr, #0
+; V7A-NEXT: and r0, r2, r0
+; V7A-NEXT: and r1, r3, r1
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: mov.w r12, #1
+; V7A-T-NEXT: subs.w lr, r2, #32
+; V7A-T-NEXT: lsl.w r2, r12, r2
+; V7A-T-NEXT: lsr.w r3, r12, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r12, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: subs r2, #1
+; V7A-T-NEXT: sbc r3, r3, #0
+; V7A-T-NEXT: ands r0, r2
+; V7A-T-NEXT: ands r1, r3
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r6, #0
+; V6M-NEXT: mov r1, r6
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: sbcs r1, r6
+; V6M-NEXT: ands r1, r5
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+; Check that we don't throw away the vreg_width-1 mask if not using shifts
+define i64 @bzhi64_a0_masked(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_a0_masked:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: and r2, r2, #63
+; V7M-NEXT: mov.w r12, #1
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: subs.w lr, r2, #32
+; V7M-NEXT: lsl.w r2, r12, r2
+; V7M-NEXT: lsr.w r3, r12, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r3, r12, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs r2, #1
+; V7M-NEXT: sbc r3, r3, #0
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_a0_masked:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: and r2, r2, #63
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr lr, r12, r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsl r2, r12, r2
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: lslpl lr, r12, r3
+; V7A-NEXT: subs r2, r2, #1
+; V7A-NEXT: sbc r3, lr, #0
+; V7A-NEXT: and r0, r2, r0
+; V7A-NEXT: and r1, r3, r1
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a0_masked:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: and r2, r2, #63
+; V7A-T-NEXT: mov.w r12, #1
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: subs.w lr, r2, #32
+; V7A-T-NEXT: lsl.w r2, r12, r2
+; V7A-T-NEXT: lsr.w r3, r12, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r12, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: subs r2, #1
+; V7A-T-NEXT: sbc r3, r3, #0
+; V7A-T-NEXT: ands r0, r2
+; V7A-T-NEXT: ands r1, r3
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a0_masked:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #63
+; V6M-NEXT: ands r2, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r6, #0
+; V6M-NEXT: mov r1, r6
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: sbcs r1, r6
+; V6M-NEXT: ands r1, r5
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %numlowbits.masked = and i64 %numlowbits, 63
+ %onebit = shl i64 1, %numlowbits.masked
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_a1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: mov.w r12, #1
+; V7M-NEXT: subs.w lr, r2, #32
+; V7M-NEXT: lsl.w r2, r12, r2
+; V7M-NEXT: lsr.w r3, r12, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r3, r12, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs r2, #1
+; V7M-NEXT: sbc r3, r3, #0
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_a1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: lsr lr, r12, r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsl r2, r12, r2
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: lslpl lr, r12, r3
+; V7A-NEXT: subs r2, r2, #1
+; V7A-NEXT: sbc r3, lr, #0
+; V7A-NEXT: and r0, r2, r0
+; V7A-NEXT: and r1, r3, r1
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: mov.w r12, #1
+; V7A-T-NEXT: subs.w lr, r2, #32
+; V7A-T-NEXT: lsl.w r2, r12, r2
+; V7A-T-NEXT: lsr.w r3, r12, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r12, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: subs r2, #1
+; V7A-T-NEXT: sbc r3, r3, #0
+; V7A-T-NEXT: ands r0, r2
+; V7A-T-NEXT: ands r1, r3
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r6, #0
+; V6M-NEXT: mov r1, r6
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: sbcs r1, r6
+; V6M-NEXT: ands r1, r5
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %conv = zext i8 %numlowbits to i64
+ %onebit = shl i64 1, %conv
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_a2_load(ptr %w, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_a2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r2, #32
+; V7M-NEXT: movs r3, #1
+; V7M-NEXT: subs.w r12, r2, #32
+; V7M-NEXT: lsl.w r2, r3, r2
+; V7M-NEXT: lsr.w r1, r3, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r3, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs r2, #1
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: sbc r1, r1, #0
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_a2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r6, r11, lr}
+; V7A-NEXT: push {r4, r6, r11, lr}
+; V7A-NEXT: ldr r6, [r0]
+; V7A-NEXT: mov r1, #1
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: rsb r0, r2, #32
+; V7A-NEXT: subs r4, r2, #32
+; V7A-NEXT: lsr r0, r1, r0
+; V7A-NEXT: lslpl r0, r1, r4
+; V7A-NEXT: lsl r1, r1, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: subs r2, r1, #1
+; V7A-NEXT: sbc r0, r0, #0
+; V7A-NEXT: and r1, r0, r3
+; V7A-NEXT: and r0, r2, r6
+; V7A-NEXT: pop {r4, r6, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: movs r1, #1
+; V7A-T-NEXT: ldrd r12, lr, [r0]
+; V7A-T-NEXT: subs.w r0, r2, #32
+; V7A-T-NEXT: lsr.w r3, r1, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r1, r0
+; V7A-T-NEXT: lsl.w r0, r1, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: subs r0, #1
+; V7A-T-NEXT: sbc r1, r3, #0
+; V7A-T-NEXT: and.w r0, r0, r12
+; V7A-T-NEXT: and.w r1, r1, lr
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r5, #0
+; V6M-NEXT: mov r1, r5
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r2, r0, #1
+; V6M-NEXT: sbcs r1, r5
+; V6M-NEXT: ldm r4!, {r0, r3}
+; V6M-NEXT: ands r1, r3
+; V6M-NEXT: ands r0, r2
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %val = load i64, ptr %w
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_a3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r2, r1, #32
+; V7M-NEXT: movs r3, #1
+; V7M-NEXT: subs.w r12, r1, #32
+; V7M-NEXT: lsl.w r1, r3, r1
+; V7M-NEXT: lsr.w r2, r3, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r3, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: subs r3, r1, #1
+; V7M-NEXT: sbc r1, r2, #0
+; V7M-NEXT: ldrd r0, r2, [r0]
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: ands r0, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_a3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r6, r11, lr}
+; V7A-NEXT: push {r4, r6, r11, lr}
+; V7A-NEXT: ldr r6, [r0]
+; V7A-NEXT: mov r2, #1
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: rsb r0, r1, #32
+; V7A-NEXT: subs r4, r1, #32
+; V7A-NEXT: lsl r1, r2, r1
+; V7A-NEXT: lsr r0, r2, r0
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: lslpl r0, r2, r4
+; V7A-NEXT: subs r2, r1, #1
+; V7A-NEXT: sbc r0, r0, #0
+; V7A-NEXT: and r1, r0, r3
+; V7A-NEXT: and r0, r2, r6
+; V7A-NEXT: pop {r4, r6, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r1, #32
+; V7A-T-NEXT: movs r2, #1
+; V7A-T-NEXT: ldrd r12, lr, [r0]
+; V7A-T-NEXT: subs.w r0, r1, #32
+; V7A-T-NEXT: lsr.w r3, r2, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r2, r0
+; V7A-T-NEXT: lsl.w r0, r2, r1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: subs r0, #1
+; V7A-T-NEXT: sbc r1, r3, #0
+; V7A-T-NEXT: and.w r0, r0, r12
+; V7A-T-NEXT: and.w r1, r1, lr
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r2, r1
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r5, #0
+; V6M-NEXT: mov r1, r5
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r2, r0, #1
+; V6M-NEXT: sbcs r1, r5
+; V6M-NEXT: ldm r4!, {r0, r3}
+; V6M-NEXT: ands r1, r3
+; V6M-NEXT: ands r0, r2
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %val = load i64, ptr %w
+ %conv = zext i8 %numlowbits to i64
+ %onebit = shl i64 1, %conv
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_a4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: mov.w r12, #1
+; V7M-NEXT: subs.w lr, r2, #32
+; V7M-NEXT: lsl.w r2, r12, r2
+; V7M-NEXT: lsr.w r3, r12, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r3, r12, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: subs r2, #1
+; V7M-NEXT: sbc r3, r3, #0
+; V7M-NEXT: ands r0, r2
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_a4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: mov r12, #1
+; V7A-NEXT: lsr lr, r12, r3
+; V7A-NEXT: subs r3, r2, #32
+; V7A-NEXT: lsl r2, r12, r2
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: lslpl lr, r12, r3
+; V7A-NEXT: subs r2, r2, #1
+; V7A-NEXT: sbc r3, lr, #0
+; V7A-NEXT: and r0, r0, r2
+; V7A-NEXT: and r1, r1, r3
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #32
+; V7A-T-NEXT: mov.w r12, #1
+; V7A-T-NEXT: subs.w lr, r2, #32
+; V7A-T-NEXT: lsl.w r2, r12, r2
+; V7A-T-NEXT: lsr.w r3, r12, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r3, r12, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: subs r2, #1
+; V7A-T-NEXT: sbc r3, r3, #0
+; V7A-T-NEXT: ands r0, r2
+; V7A-T-NEXT: ands r1, r3
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r6, lr}
+; V6M-NEXT: push {r4, r5, r6, lr}
+; V6M-NEXT: mov r5, r1
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #1
+; V6M-NEXT: movs r6, #0
+; V6M-NEXT: mov r1, r6
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: subs r0, r0, #1
+; V6M-NEXT: sbcs r1, r6
+; V6M-NEXT: ands r1, r5
+; V6M-NEXT: ands r0, r4
+; V6M-NEXT: pop {r4, r5, r6, pc}
+ %onebit = shl i64 1, %numlowbits
+ %mask = add nsw i64 %onebit, -1
+ %masked = and i64 %val, %mask ; swapped order
+ ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern b. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_b0(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_b0:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_b0:
+; V7A: @ %bb.0:
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: bic r0, r0, r2, lsl r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_b0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_b0:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: bx lr
+ %notmask = shl i32 -1, %numlowbits
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_b1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_b1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_b1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: bic r0, r0, r2, lsl r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_b1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_b1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: bx lr
+ %conv = zext i8 %numlowbits to i32
+ %notmask = shl i32 -1, %conv
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_b2_load(ptr %w, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_b2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_b2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: bic r0, r0, r2, lsl r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_b2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_b2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %notmask = shl i32 -1, %numlowbits
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_b3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_b3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: bic r0, r0, r2, lsl r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_b3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_b3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %conv = zext i8 %numlowbits to i32
+ %notmask = shl i32 -1, %conv
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_b4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_b4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: bics r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_b4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: bic r0, r0, r2, lsl r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_b4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: bics r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_b4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: bics r0, r2
+; V6M-NEXT: bx lr
+ %notmask = shl i32 -1, %numlowbits
+ %mask = xor i32 %notmask, -1
+ %masked = and i32 %val, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_b0(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_b0:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r12, r3, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl.w r12, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r3, r2
+; V7M-NEXT: bic.w r0, r0, r12
+; V7M-NEXT: bics r1, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_b0:
+; V7A: @ %bb.0:
+; V7A-NEXT: subs r12, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lslpl r3, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r1, r1, r3
+; V7A-NEXT: bic r0, r0, r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_b0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r12, r3, r2
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w r12, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r3, r2
+; V7A-T-NEXT: bic.w r0, r0, r12
+; V7A-T-NEXT: bics r1, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_b0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r5, r0
+; V6M-NEXT: bics r4, r1
+; V6M-NEXT: mov r0, r5
+; V6M-NEXT: mov r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %notmask = shl i64 -1, %numlowbits
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_b1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_b1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r12, r3, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl.w r12, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r3, r2
+; V7M-NEXT: bic.w r0, r0, r12
+; V7M-NEXT: bics r1, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_b1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: subs r12, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lslpl r3, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r1, r1, r3
+; V7A-NEXT: bic r0, r0, r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_b1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r12, r3, r2
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w r12, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r3, r2
+; V7A-T-NEXT: bic.w r0, r0, r12
+; V7A-T-NEXT: bics r1, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_b1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r5, r0
+; V6M-NEXT: bics r4, r1
+; V6M-NEXT: mov r0, r5
+; V6M-NEXT: mov r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %conv = zext i8 %numlowbits to i64
+ %notmask = shl i64 -1, %conv
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_b2_load(ptr %w, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_b2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r1, #-1
+; V7M-NEXT: subs.w r12, r2, #32
+; V7M-NEXT: lsl.w r3, r1, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: ldrd r0, r2, [r0]
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r1, r12
+; V7M-NEXT: bics r0, r3
+; V7M-NEXT: bic.w r1, r2, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_b2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, lr}
+; V7A-NEXT: push {r4, lr}
+; V7A-NEXT: ldr r4, [r0]
+; V7A-NEXT: mvn r1, #0
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: subs r0, r2, #32
+; V7A-NEXT: lsl r2, r1, r2
+; V7A-NEXT: lslpl r1, r1, r0
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r1, r3, r1
+; V7A-NEXT: bic r0, r4, r2
+; V7A-NEXT: pop {r4, pc}
+;
+; V7A-T-LABEL: bzhi64_b2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r1, #-1
+; V7A-T-NEXT: ldrd r0, r12, [r0]
+; V7A-T-NEXT: lsl.w r3, r1, r2
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r1, r2
+; V7A-T-NEXT: bics r0, r3
+; V7A-T-NEXT: bic.w r1, r12, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_b2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: ldm r4!, {r2, r3}
+; V6M-NEXT: bics r2, r0
+; V6M-NEXT: bics r3, r1
+; V6M-NEXT: mov r0, r2
+; V6M-NEXT: mov r1, r3
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %notmask = shl i64 -1, %numlowbits
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_b3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: subs.w r12, r1, #32
+; V7M-NEXT: lsl.w r3, r2, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r3, #0
+; V7M-NEXT: ldrd r0, r1, [r0]
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r2, r12
+; V7M-NEXT: bics r1, r2
+; V7M-NEXT: bics r0, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_b3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r6, r11, lr}
+; V7A-NEXT: push {r4, r6, r11, lr}
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: ldr r6, [r0]
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: subs r0, r1, #32
+; V7A-NEXT: lsl r4, r2, r1
+; V7A-NEXT: lslpl r2, r2, r0
+; V7A-NEXT: movwpl r4, #0
+; V7A-NEXT: bic r1, r3, r2
+; V7A-NEXT: bic r0, r6, r4
+; V7A-NEXT: pop {r4, r6, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_b3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: ldrd r0, r12, [r0]
+; V7A-T-NEXT: lsl.w r3, r2, r1
+; V7A-T-NEXT: subs r1, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r3, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r2, r1
+; V7A-T-NEXT: bics r0, r3
+; V7A-T-NEXT: bic.w r1, r12, r2
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_b3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: mov r2, r1
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: ldm r4!, {r2, r3}
+; V6M-NEXT: bics r2, r0
+; V6M-NEXT: bics r3, r1
+; V6M-NEXT: mov r0, r2
+; V6M-NEXT: mov r1, r3
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %conv = zext i8 %numlowbits to i64
+ %notmask = shl i64 -1, %conv
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_b4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r12, r3, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl.w r12, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r3, r2
+; V7M-NEXT: bic.w r0, r0, r12
+; V7M-NEXT: bics r1, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_b4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: subs r12, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lslpl r3, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: bic r1, r1, r3
+; V7A-NEXT: bic r0, r0, r2
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_b4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r12, r3, r2
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w r12, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r3, r2
+; V7A-T-NEXT: bic.w r0, r0, r12
+; V7A-T-NEXT: bics r1, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_b4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: bics r5, r0
+; V6M-NEXT: bics r4, r1
+; V6M-NEXT: mov r0, r5
+; V6M-NEXT: mov r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %notmask = shl i64 -1, %numlowbits
+ %mask = xor i64 %notmask, -1
+ %masked = and i64 %val, %mask ; swapped order
+ ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern c. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_c0:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_c0:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_c0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_c0:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_c1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_c1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_c1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_c1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %mask = lshr i32 -1, %sh_prom
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_c2_load(ptr %w, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_c2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_c2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_c2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_c2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_c3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_c3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_c3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_c3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %mask = lshr i32 -1, %sh_prom
+ %masked = and i32 %mask, %val
+ ret i32 %masked
+}
+
+define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_c4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_c4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_c4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_c4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %numhighbits = sub i32 32, %numlowbits
+ %mask = lshr i32 -1, %numhighbits
+ %masked = and i32 %val, %mask ; swapped order
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_c0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsbs.w lr, r2, #32
+; V7M-NEXT: rsb.w r2, r2, #64
+; V7M-NEXT: mov.w r12, #-1
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsr.w r2, r12, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r3, r3, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: ands r0, r3
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_c0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsbs lr, r2, #32
+; V7A-NEXT: rsb r2, r2, #64
+; V7A-NEXT: mvn r12, #0
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsr r2, r12, r2
+; V7A-NEXT: lsrpl r3, r3, lr
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: and r1, r2, r1
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_c0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsbs.w lr, r2, #32
+; V7A-T-NEXT: rsb.w r2, r2, #64
+; V7A-T-NEXT: mov.w r12, #-1
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsr.w r2, r12, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r3, r3, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: ands r0, r3
+; V7A-T-NEXT: ands r1, r2
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r2, r0, r2
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_c1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r2, r2, #64
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: uxtb r2, r2
+; V7M-NEXT: subs.w r12, r2, #32
+; V7M-NEXT: lsr.w r2, r3, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r3, r3, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: ands r0, r3
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_c1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb lr, r2, #64
+; V7A-NEXT: mvn r2, #31
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: uxtb r12, lr
+; V7A-NEXT: uxtab r2, r2, lr
+; V7A-NEXT: lsr r12, r3, r12
+; V7A-NEXT: cmp r2, #0
+; V7A-NEXT: movwpl r12, #0
+; V7A-NEXT: lsrpl r3, r3, r2
+; V7A-NEXT: and r1, r12, r1
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_c1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w lr, r2, #64
+; V7A-T-NEXT: mvn r2, #31
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: uxtb.w r12, lr
+; V7A-T-NEXT: uxtab r2, r2, lr
+; V7A-T-NEXT: lsr.w r12, r3, r12
+; V7A-T-NEXT: cmp r2, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w r12, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r3, r2
+; V7A-T-NEXT: and.w r1, r1, r12
+; V7A-T-NEXT: ands r0, r3
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r0, r0, r2
+; V6M-NEXT: uxtb r2, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %mask = lshr i64 -1, %sh_prom
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_c2_load(ptr %w, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_c2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsbs.w r1, r2, #32
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: rsb.w r2, r2, #64
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl r3, r1
+; V7M-NEXT: ldrd r0, r1, [r0]
+; V7M-NEXT: mov.w r12, #-1
+; V7M-NEXT: lsr.w r2, r12, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: ands r0, r3
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_c2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r5, lr}
+; V7A-NEXT: push {r5, lr}
+; V7A-NEXT: rsbs r1, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: mvn r12, #0
+; V7A-NEXT: ldm r0, {r0, r5}
+; V7A-NEXT: lsrpl r3, r3, r1
+; V7A-NEXT: rsb r1, r2, #64
+; V7A-NEXT: and r0, r3, r0
+; V7A-NEXT: lsr r1, r12, r1
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: and r1, r1, r5
+; V7A-NEXT: pop {r5, pc}
+;
+; V7A-T-LABEL: bzhi64_c2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsbs.w r1, r2, #32
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: ldrd r0, lr, [r0]
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r3, r1
+; V7A-T-NEXT: rsb.w r1, r2, #64
+; V7A-T-NEXT: mov.w r12, #-1
+; V7A-T-NEXT: and.w r0, r0, r3
+; V7A-T-NEXT: lsr.w r1, r12, r1
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: and.w r1, r1, lr
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r2, r0, r2
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldm r4!, {r2, r3}
+; V6M-NEXT: ands r0, r2
+; V6M-NEXT: ands r1, r3
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_c3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #64
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: subs.w r2, r1, #32
+; V7M-NEXT: lsr.w r1, r3, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl r3, r2
+; V7M-NEXT: ldrd r0, r2, [r0]
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: ands r0, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_c3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r4, r6, r11, lr}
+; V7A-NEXT: push {r4, r6, r11, lr}
+; V7A-NEXT: rsb r1, r1, #64
+; V7A-NEXT: mvn r4, #31
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: ldr r6, [r0]
+; V7A-NEXT: ldr r3, [r0, #4]
+; V7A-NEXT: uxtb r0, r1
+; V7A-NEXT: uxtab r4, r4, r1
+; V7A-NEXT: lsr r0, r2, r0
+; V7A-NEXT: cmp r4, #0
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: and r1, r0, r3
+; V7A-NEXT: lsrpl r2, r2, r4
+; V7A-NEXT: and r0, r2, r6
+; V7A-NEXT: pop {r4, r6, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_c3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r1, r1, #64
+; V7A-T-NEXT: mvn r3, #31
+; V7A-T-NEXT: ldrd r12, lr, [r0]
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: uxtb r0, r1
+; V7A-T-NEXT: uxtab r3, r3, r1
+; V7A-T-NEXT: lsr.w r0, r2, r0
+; V7A-T-NEXT: cmp r3, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: and.w r1, r0, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl r2, r3
+; V7A-T-NEXT: and.w r0, r2, r12
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: mov r4, r0
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r0, r0, r1
+; V6M-NEXT: uxtb r2, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ldm r4!, {r2, r3}
+; V6M-NEXT: ands r0, r2
+; V6M-NEXT: ands r1, r3
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %mask = lshr i64 -1, %sh_prom
+ %masked = and i64 %mask, %val
+ ret i64 %masked
+}
+
+define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_c4_commutative:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsbs.w lr, r2, #32
+; V7M-NEXT: rsb.w r2, r2, #64
+; V7M-NEXT: mov.w r12, #-1
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsr.w r2, r12, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r3, r3, lr
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r2, #0
+; V7M-NEXT: ands r0, r3
+; V7M-NEXT: ands r1, r2
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_c4_commutative:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsbs lr, r2, #32
+; V7A-NEXT: rsb r2, r2, #64
+; V7A-NEXT: mvn r12, #0
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsr r2, r12, r2
+; V7A-NEXT: lsrpl r3, r3, lr
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: and r0, r0, r3
+; V7A-NEXT: and r1, r1, r2
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_c4_commutative:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsbs.w lr, r2, #32
+; V7A-T-NEXT: rsb.w r2, r2, #64
+; V7A-T-NEXT: mov.w r12, #-1
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsr.w r2, r12, r2
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r3, r3, lr
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r2, #0
+; V7A-T-NEXT: ands r0, r3
+; V7A-T-NEXT: ands r1, r2
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c4_commutative:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #64
+; V6M-NEXT: subs r2, r0, r2
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %numhighbits = sub i64 64, %numlowbits
+ %mask = lshr i64 -1, %numhighbits
+ %masked = and i64 %val, %mask ; swapped order
+ ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern d. 32-bit.
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_d0(i32 %val, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_d0:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_d0:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_d0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_d0:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %numhighbits = sub i32 32, %numlowbits
+ %highbitscleared = shl i32 %val, %numhighbits
+ %masked = lshr i32 %highbitscleared, %numhighbits
+ ret i32 %masked
+}
+
+define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_d1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_d1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_d1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_d1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %highbitscleared = shl i32 %val, %sh_prom
+ %masked = lshr i32 %highbitscleared, %sh_prom
+ ret i32 %masked
+}
+
+define i32 @bzhi32_d2_load(ptr %w, i32 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_d2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_d2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_d2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_d2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %numhighbits = sub i32 32, %numlowbits
+ %highbitscleared = shl i32 %val, %numhighbits
+ %masked = lshr i32 %highbitscleared, %numhighbits
+ ret i32 %masked
+}
+
+define i32 @bzhi32_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi32_d3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #32
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: lsls r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_d3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: rsb r1, r1, #32
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: uxtb r1, r1
+; V7A-NEXT: lsl r0, r0, r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_d3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: rsb.w r1, r1, #32
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: uxtb r1, r1
+; V7A-T-NEXT: lsls r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_d3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #32
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: uxtb r1, r1
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: lsls r0, r1
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: bx lr
+ %val = load i32, ptr %w
+ %numhighbits = sub i8 32, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i32
+ %highbitscleared = shl i32 %val, %sh_prom
+ %masked = lshr i32 %highbitscleared, %sh_prom
+ ret i32 %masked
+}
+
+; 64-bit.
+
+define i64 @bzhi64_d0(i64 %val, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_d0:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r3, r2, #64
+; V7M-NEXT: rsbs.w r2, r2, #32
+; V7M-NEXT: rsb.w lr, r3, #32
+; V7M-NEXT: lsl.w r12, r1, r3
+; V7M-NEXT: lsr.w r1, r0, lr
+; V7M-NEXT: orr.w r1, r1, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r0, r2
+; V7M-NEXT: lsl.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r12, r1, lr
+; V7M-NEXT: lsr.w r0, r0, r3
+; V7M-NEXT: orr.w r0, r0, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r2
+; V7M-NEXT: lsr.w r1, r1, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_d0:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb lr, r2, #64
+; V7A-NEXT: rsbs r2, r2, #32
+; V7A-NEXT: rsb r12, lr, #32
+; V7A-NEXT: lsr r3, r0, r12
+; V7A-NEXT: orr r1, r3, r1, lsl lr
+; V7A-NEXT: lslpl r1, r0, r2
+; V7A-NEXT: lsl r0, r0, lr
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, lr
+; V7A-NEXT: orr r0, r0, r1, lsl r12
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: lsr r1, r1, lr
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_d0:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #64
+; V7A-T-NEXT: rsbs.w r2, r2, #32
+; V7A-T-NEXT: rsb.w lr, r3, #32
+; V7A-T-NEXT: lsl.w r12, r1, r3
+; V7A-T-NEXT: lsr.w r1, r0, lr
+; V7A-T-NEXT: orr.w r1, r1, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r1, r0, r2
+; V7A-T-NEXT: lsl.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r12, r1, lr
+; V7A-T-NEXT: lsr.w r0, r0, r3
+; V7A-T-NEXT: orr.w r0, r0, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: lsr.w r1, r1, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_d0:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: movs r3, #64
+; V6M-NEXT: subs r4, r3, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %numhighbits = sub i64 64, %numlowbits
+ %highbitscleared = shl i64 %val, %numhighbits
+ %masked = lshr i64 %highbitscleared, %numhighbits
+ ret i64 %masked
+}
+
+define i64 @bzhi64_d1_indexzext(i64 %val, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_d1_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r2, r2, #64
+; V7M-NEXT: uxtb r2, r2
+; V7M-NEXT: rsb.w r3, r2, #32
+; V7M-NEXT: lsl.w r12, r1, r2
+; V7M-NEXT: lsr.w r1, r0, r3
+; V7M-NEXT: orr.w r1, r1, r12
+; V7M-NEXT: subs.w r12, r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r1, r0, r12
+; V7M-NEXT: lsl.w r0, r0, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r3, r1, r3
+; V7M-NEXT: lsr.w r0, r0, r2
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r1, r12
+; V7M-NEXT: lsr.w r1, r1, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_d1_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r11, lr}
+; V7A-NEXT: push {r11, lr}
+; V7A-NEXT: rsb lr, r2, #64
+; V7A-NEXT: uxtb r3, lr
+; V7A-NEXT: rsb r12, r3, #32
+; V7A-NEXT: lsr r2, r0, r12
+; V7A-NEXT: orr r1, r2, r1, lsl r3
+; V7A-NEXT: mvn r2, #31
+; V7A-NEXT: uxtab r2, r2, lr
+; V7A-NEXT: cmp r2, #0
+; V7A-NEXT: lslpl r1, r0, r2
+; V7A-NEXT: lsl r0, r0, r3
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r3
+; V7A-NEXT: orr r0, r0, r1, lsl r12
+; V7A-NEXT: lsrpl r0, r1, r2
+; V7A-NEXT: lsr r1, r1, r3
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_d1_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: rsb.w r4, r2, #64
+; V7A-T-NEXT: mvn r2, #31
+; V7A-T-NEXT: uxtb r3, r4
+; V7A-T-NEXT: rsb.w lr, r3, #32
+; V7A-T-NEXT: lsl.w r12, r1, r3
+; V7A-T-NEXT: uxtab r2, r2, r4
+; V7A-T-NEXT: lsr.w r1, r0, lr
+; V7A-T-NEXT: cmp r2, #0
+; V7A-T-NEXT: orr.w r1, r1, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r1, r0, r2
+; V7A-T-NEXT: lsl.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r4, r1, lr
+; V7A-T-NEXT: lsr.w r0, r0, r3
+; V7A-T-NEXT: orr.w r0, r0, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: lsr.w r1, r1, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bzhi64_d1_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: movs r3, #64
+; V6M-NEXT: subs r2, r3, r2
+; V6M-NEXT: uxtb r4, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %highbitscleared = shl i64 %val, %sh_prom
+ %masked = lshr i64 %highbitscleared, %sh_prom
+ ret i64 %masked
+}
+
+define i64 @bzhi64_d2_load(ptr %w, i64 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_d2_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: .save {r7, lr}
+; V7M-NEXT: push {r7, lr}
+; V7M-NEXT: rsb.w r1, r2, #64
+; V7M-NEXT: ldrd r0, r3, [r0]
+; V7M-NEXT: rsb.w lr, r1, #32
+; V7M-NEXT: rsbs.w r2, r2, #32
+; V7M-NEXT: lsl.w r12, r3, r1
+; V7M-NEXT: lsr.w r3, r0, lr
+; V7M-NEXT: orr.w r3, r3, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r3, r0, r2
+; V7M-NEXT: lsl.w r0, r0, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r12, r3, lr
+; V7M-NEXT: lsr.w r0, r0, r1
+; V7M-NEXT: lsr.w r1, r3, r1
+; V7M-NEXT: orr.w r0, r0, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r3, r2
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_d2_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r5, r7, r11, lr}
+; V7A-NEXT: push {r5, r7, r11, lr}
+; V7A-NEXT: rsb r3, r2, #64
+; V7A-NEXT: ldm r0, {r0, r7}
+; V7A-NEXT: rsb r1, r3, #32
+; V7A-NEXT: rsbs r2, r2, #32
+; V7A-NEXT: lsr r5, r0, r1
+; V7A-NEXT: orr r7, r5, r7, lsl r3
+; V7A-NEXT: lslpl r7, r0, r2
+; V7A-NEXT: lsl r0, r0, r3
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r3
+; V7A-NEXT: orr r0, r0, r7, lsl r1
+; V7A-NEXT: lsr r1, r7, r3
+; V7A-NEXT: lsrpl r0, r7, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: pop {r5, r7, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_d2_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r7, lr}
+; V7A-T-NEXT: push {r7, lr}
+; V7A-T-NEXT: rsb.w r3, r2, #64
+; V7A-T-NEXT: ldrd r0, r1, [r0]
+; V7A-T-NEXT: rsb.w lr, r3, #32
+; V7A-T-NEXT: rsbs.w r2, r2, #32
+; V7A-T-NEXT: lsl.w r12, r1, r3
+; V7A-T-NEXT: lsr.w r1, r0, lr
+; V7A-T-NEXT: orr.w r1, r1, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r1, r0, r2
+; V7A-T-NEXT: lsl.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r12, r1, lr
+; V7A-T-NEXT: lsr.w r0, r0, r3
+; V7A-T-NEXT: orr.w r0, r0, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r1, r2
+; V7A-T-NEXT: lsr.w r1, r1, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_d2_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: movs r1, #64
+; V6M-NEXT: subs r4, r1, r2
+; V6M-NEXT: ldr r2, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: mov r0, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %numhighbits = sub i64 64, %numlowbits
+ %highbitscleared = shl i64 %val, %numhighbits
+ %masked = lshr i64 %highbitscleared, %numhighbits
+ ret i64 %masked
+}
+
+define i64 @bzhi64_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
+; V7M-LABEL: bzhi64_d3_load_indexzext:
+; V7M: @ %bb.0:
+; V7M-NEXT: rsb.w r1, r1, #64
+; V7M-NEXT: ldrd r0, r2, [r0]
+; V7M-NEXT: uxtb r1, r1
+; V7M-NEXT: rsb.w r3, r1, #32
+; V7M-NEXT: lsl.w r12, r2, r1
+; V7M-NEXT: lsr.w r2, r0, r3
+; V7M-NEXT: orr.w r2, r2, r12
+; V7M-NEXT: subs.w r12, r1, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl.w r2, r0, r12
+; V7M-NEXT: lsl.w r0, r0, r1
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r0, #0
+; V7M-NEXT: lsl.w r3, r2, r3
+; V7M-NEXT: lsr.w r0, r0, r1
+; V7M-NEXT: lsr.w r1, r2, r1
+; V7M-NEXT: orr.w r0, r0, r3
+; V7M-NEXT: it pl
+; V7M-NEXT: lsrpl.w r0, r2, r12
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_d3_load_indexzext:
+; V7A: @ %bb.0:
+; V7A-NEXT: .save {r5, r7, r11, lr}
+; V7A-NEXT: push {r5, r7, r11, lr}
+; V7A-NEXT: rsb r1, r1, #64
+; V7A-NEXT: ldm r0, {r0, r7}
+; V7A-NEXT: uxtb r2, r1
+; V7A-NEXT: rsb r3, r2, #32
+; V7A-NEXT: lsr r5, r0, r3
+; V7A-NEXT: orr r7, r5, r7, lsl r2
+; V7A-NEXT: mvn r5, #31
+; V7A-NEXT: uxtab r1, r5, r1
+; V7A-NEXT: cmp r1, #0
+; V7A-NEXT: lslpl r7, r0, r1
+; V7A-NEXT: lsl r0, r0, r2
+; V7A-NEXT: movwpl r0, #0
+; V7A-NEXT: lsr r0, r0, r2
+; V7A-NEXT: orr r0, r0, r7, lsl r3
+; V7A-NEXT: lsrpl r0, r7, r1
+; V7A-NEXT: lsr r1, r7, r2
+; V7A-NEXT: movwpl r1, #0
+; V7A-NEXT: pop {r5, r7, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_d3_load_indexzext:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: .save {r4, lr}
+; V7A-T-NEXT: push {r4, lr}
+; V7A-T-NEXT: rsb.w r4, r1, #64
+; V7A-T-NEXT: ldrd r0, r2, [r0]
+; V7A-T-NEXT: mvn r1, #31
+; V7A-T-NEXT: uxtb r3, r4
+; V7A-T-NEXT: rsb.w lr, r3, #32
+; V7A-T-NEXT: lsl.w r12, r2, r3
+; V7A-T-NEXT: uxtab r1, r1, r4
+; V7A-T-NEXT: lsr.w r2, r0, lr
+; V7A-T-NEXT: cmp r1, #0
+; V7A-T-NEXT: orr.w r2, r2, r12
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl.w r2, r0, r1
+; V7A-T-NEXT: lsl.w r0, r0, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r0, #0
+; V7A-T-NEXT: lsl.w r4, r2, lr
+; V7A-T-NEXT: lsr.w r0, r0, r3
+; V7A-T-NEXT: orr.w r0, r0, r4
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lsrpl.w r0, r2, r1
+; V7A-T-NEXT: lsr.w r1, r2, r3
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl r1, #0
+; V7A-T-NEXT: pop {r4, pc}
+;
+; V6M-LABEL: bzhi64_d3_load_indexzext:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, lr}
+; V6M-NEXT: push {r4, lr}
+; V6M-NEXT: movs r2, #64
+; V6M-NEXT: subs r1, r2, r1
+; V6M-NEXT: uxtb r4, r1
+; V6M-NEXT: ldr r2, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: mov r0, r2
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: mov r2, r4
+; V6M-NEXT: bl __aeabi_llsr
+; V6M-NEXT: pop {r4, pc}
+ %val = load i64, ptr %w
+ %numhighbits = sub i8 64, %numlowbits
+ %sh_prom = zext i8 %numhighbits to i64
+ %highbitscleared = shl i64 %val, %sh_prom
+ %masked = lshr i64 %highbitscleared, %sh_prom
+ ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Constant mask
+; ---------------------------------------------------------------------------- ;
+
+; 32-bit
+
+define i32 @bzhi32_constant_mask32(i32 %val) nounwind {
+; V7M-LABEL: bzhi32_constant_mask32:
+; V7M: @ %bb.0:
+; V7M-NEXT: bic r0, r0, #-2147483648
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask32:
+; V7A: @ %bb.0:
+; V7A-NEXT: bic r0, r0, #-2147483648
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: bic r0, r0, #-2147483648
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask32:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r1, #31
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: bx lr
+ %masked = and i32 %val, 2147483647
+ ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask32_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi32_constant_mask32_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: bic r0, r0, #-2147483648
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask32_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: bic r0, r0, #-2147483648
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask32_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: bic r0, r0, #-2147483648
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask32_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r1, #31
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: bx lr
+ %val1 = load i32, ptr %val
+ %masked = and i32 %val1, 2147483647
+ ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask16(i32 %val) nounwind {
+; V7M-LABEL: bzhi32_constant_mask16:
+; V7M: @ %bb.0:
+; V7M-NEXT: bfc r0, #15, #17
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask16:
+; V7A: @ %bb.0:
+; V7A-NEXT: bfc r0, #15, #17
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask16:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: bfc r0, #15, #17
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask16:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, .LCPI41_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI41_0:
+; V6M-NEXT: .long 32767 @ 0x7fff
+ %masked = and i32 %val, 32767
+ ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask16_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi32_constant_mask16_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: bfc r0, #15, #17
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask16_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: bfc r0, #15, #17
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask16_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: bfc r0, #15, #17
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask16_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, [r0]
+; V6M-NEXT: ldr r0, .LCPI42_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI42_0:
+; V6M-NEXT: .long 32767 @ 0x7fff
+ %val1 = load i32, ptr %val
+ %masked = and i32 %val1, 32767
+ ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask8(i32 %val) nounwind {
+; V7M-LABEL: bzhi32_constant_mask8:
+; V7M: @ %bb.0:
+; V7M-NEXT: and r0, r0, #127
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask8:
+; V7A: @ %bb.0:
+; V7A-NEXT: and r0, r0, #127
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask8:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: and r0, r0, #127
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask8:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #127
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %masked = and i32 %val, 127
+ ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask8_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi32_constant_mask8_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: and r0, r0, #127
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask8_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: and r0, r0, #127
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask8_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: and r0, r0, #127
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask8_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, [r0]
+; V6M-NEXT: movs r0, #127
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: bx lr
+ %val1 = load i32, ptr %val
+ %masked = and i32 %val1, 127
+ ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_constant_mask64(i64 %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask64:
+; V7M: @ %bb.0:
+; V7M-NEXT: bic r1, r1, #-1073741824
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask64:
+; V7A: @ %bb.0:
+; V7A-NEXT: bic r1, r1, #-1073741824
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: bic r1, r1, #-1073741824
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask64:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #3
+; V6M-NEXT: lsls r2, r2, #30
+; V6M-NEXT: bics r1, r2
+; V6M-NEXT: bx lr
+ %masked = and i64 %val, 4611686018427387903
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask64_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask64_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldrd r0, r1, [r0]
+; V7M-NEXT: bic r1, r1, #-1073741824
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask64_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldrd r0, r1, [r0]
+; V7A-NEXT: bic r1, r1, #-1073741824
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask64_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldrd r0, r1, [r0]
+; V7A-T-NEXT: bic r1, r1, #-1073741824
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask64_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #3
+; V6M-NEXT: lsls r3, r1, #30
+; V6M-NEXT: ldr r2, [r0]
+; V6M-NEXT: ldr r1, [r0, #4]
+; V6M-NEXT: bics r1, r3
+; V6M-NEXT: mov r0, r2
+; V6M-NEXT: bx lr
+ %val1 = load i64, ptr %val
+ %masked = and i64 %val1, 4611686018427387903
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask32(i64 %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask32:
+; V7M: @ %bb.0:
+; V7M-NEXT: bic r0, r0, #-2147483648
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask32:
+; V7A: @ %bb.0:
+; V7A-NEXT: bic r0, r0, #-2147483648
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask32:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: bic r0, r0, #-2147483648
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask32:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r1, #31
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+ %masked = and i64 %val, 2147483647
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask32_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask32_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bic r0, r0, #-2147483648
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask32_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bic r0, r0, #-2147483648
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask32_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bic r0, r0, #-2147483648
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask32_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #1
+; V6M-NEXT: lsls r1, r1, #31
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: bics r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+ %val1 = load i64, ptr %val
+ %masked = and i64 %val1, 2147483647
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask16(i64 %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask16:
+; V7M: @ %bb.0:
+; V7M-NEXT: bfc r0, #15, #17
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask16:
+; V7A: @ %bb.0:
+; V7A-NEXT: bfc r0, #15, #17
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask16:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: bfc r0, #15, #17
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask16:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, .LCPI49_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI49_0:
+; V6M-NEXT: .long 32767 @ 0x7fff
+ %masked = and i64 %val, 32767
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask16_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask16_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bfc r0, #15, #17
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask16_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bfc r0, #15, #17
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask16_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bfc r0, #15, #17
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask16_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, [r0]
+; V6M-NEXT: ldr r0, .LCPI50_0
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+; V6M-NEXT: .p2align 2
+; V6M-NEXT: @ %bb.1:
+; V6M-NEXT: .LCPI50_0:
+; V6M-NEXT: .long 32767 @ 0x7fff
+ %val1 = load i64, ptr %val
+ %masked = and i64 %val1, 32767
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask8(i64 %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask8:
+; V7M: @ %bb.0:
+; V7M-NEXT: and r0, r0, #127
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask8:
+; V7A: @ %bb.0:
+; V7A-NEXT: and r0, r0, #127
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask8:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: and r0, r0, #127
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask8:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r1, #127
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+ %masked = and i64 %val, 127
+ ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask8_load(ptr %val) nounwind {
+; V7M-LABEL: bzhi64_constant_mask8_load:
+; V7M: @ %bb.0:
+; V7M-NEXT: ldr r0, [r0]
+; V7M-NEXT: movs r1, #0
+; V7M-NEXT: and r0, r0, #127
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask8_load:
+; V7A: @ %bb.0:
+; V7A-NEXT: ldr r0, [r0]
+; V7A-NEXT: mov r1, #0
+; V7A-NEXT: and r0, r0, #127
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask8_load:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: ldr r0, [r0]
+; V7A-T-NEXT: movs r1, #0
+; V7A-T-NEXT: and r0, r0, #127
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask8_load:
+; V6M: @ %bb.0:
+; V6M-NEXT: ldr r1, [r0]
+; V6M-NEXT: movs r0, #127
+; V6M-NEXT: ands r0, r1
+; V6M-NEXT: movs r1, #0
+; V6M-NEXT: bx lr
+ %val1 = load i64, ptr %val
+ %masked = and i64 %val1, 127
+ ret i64 %masked
+}
diff --git a/llvm/test/CodeGen/ARM/inline-asm-clobber.ll b/llvm/test/CodeGen/ARM/inline-asm-clobber.ll
index 7b1331f..f44ad2a 100644
--- a/llvm/test/CodeGen/ARM/inline-asm-clobber.ll
+++ b/llvm/test/CodeGen/ARM/inline-asm-clobber.ll
@@ -6,12 +6,19 @@
; RUN: llc <%s -mtriple=arm-none-eabi --frame-pointer=all 2>&1 \
; RUN: | FileCheck %s -check-prefix=NO_FP_ELIM
+; RUN: llc <%s -mtriple=armv6-apple-ios2 2>&1 | FileCheck %s -check-prefix=IOS2
+; RUN: llc <%s -mtriple=armv6k-apple-ios2 2>&1 | FileCheck %s -check-prefix=IOS2
+; RUN: llc <%s -mtriple=armv6k-apple-ios3 2>&1 | FileCheck %s -check-prefix=IOS3
+; RUN: llc <%s -mtriple=armv7-apple-ios2 2>&1 | FileCheck %s -check-prefix=IOS3
+
; CHECK: warning: inline asm clobber list contains reserved registers: SP, PC
; CHECK: warning: inline asm clobber list contains reserved registers: R11
; RWPI: warning: inline asm clobber list contains reserved registers: R9, SP, PC
; RWPI: warning: inline asm clobber list contains reserved registers: R11
; NO_FP_ELIM: warning: inline asm clobber list contains reserved registers: R11, SP, PC
; NO_FP_ELIM: warning: inline asm clobber list contains reserved registers: R11
+; IOS2: warning: inline asm clobber list contains reserved registers: R9, SP, PC
+; IOS3: warning: inline asm clobber list contains reserved registers: SP, PC
define void @foo() nounwind {
call void asm sideeffect "mov r7, #1",
diff --git a/llvm/test/CodeGen/ARM/llrint-conv.ll b/llvm/test/CodeGen/ARM/llrint-conv.ll
index 749ee00..a1a04db 100644
--- a/llvm/test/CodeGen/ARM/llrint-conv.ll
+++ b/llvm/test/CodeGen/ARM/llrint-conv.ll
@@ -1,46 +1,71 @@
-; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP
-; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=armv7-none-eabi -float-abi=soft | FileCheck %s --check-prefixes=CHECK,CHECK-SOFT
+; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16
+; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2,+fullfp16 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
-; SOFTFP-LABEL: testmsxh_builtin:
-; SOFTFP: bl llrintf
-; HARDFP-LABEL: testmsxh_builtin:
-; HARDFP: bl llrintf
define i64 @testmsxh_builtin(half %x) {
+; CHECK-SOFT-LABEL: testmsxh_builtin:
+; CHECK-SOFT: @ %bb.0: @ %entry
+; CHECK-SOFT-NEXT: .save {r11, lr}
+; CHECK-SOFT-NEXT: push {r11, lr}
+; CHECK-SOFT-NEXT: bl __aeabi_h2f
+; CHECK-SOFT-NEXT: bl llrintf
+; CHECK-SOFT-NEXT: pop {r11, pc}
+;
+; CHECK-NOFP16-LABEL: testmsxh_builtin:
+; CHECK-NOFP16: @ %bb.0: @ %entry
+; CHECK-NOFP16-NEXT: .save {r11, lr}
+; CHECK-NOFP16-NEXT: push {r11, lr}
+; CHECK-NOFP16-NEXT: vmov r0, s0
+; CHECK-NOFP16-NEXT: bl __aeabi_h2f
+; CHECK-NOFP16-NEXT: vmov s0, r0
+; CHECK-NOFP16-NEXT: bl llrintf
+; CHECK-NOFP16-NEXT: pop {r11, pc}
+;
+; CHECK-FP16-LABEL: testmsxh_builtin:
+; CHECK-FP16: @ %bb.0: @ %entry
+; CHECK-FP16-NEXT: .save {r11, lr}
+; CHECK-FP16-NEXT: push {r11, lr}
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl llrintf
+; CHECK-FP16-NEXT: pop {r11, pc}
entry:
%0 = tail call i64 @llvm.llrint.i64.f16(half %x)
ret i64 %0
}
-; SOFTFP-LABEL: testmsxs_builtin:
-; SOFTFP: bl llrintf
-; HARDFP-LABEL: testmsxs_builtin:
-; HARDFP: bl llrintf
define i64 @testmsxs_builtin(float %x) {
+; CHECK-LABEL: testmsxs_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl llrintf
+; CHECK-NEXT: pop {r11, pc}
entry:
%0 = tail call i64 @llvm.llrint.i64.f32(float %x)
ret i64 %0
}
-; SOFTFP-LABEL: testmsxd_builtin:
-; SOFTFP: bl llrint
-; HARDFP-LABEL: testmsxd_builtin:
-; HARDFP: bl llrint
define i64 @testmsxd_builtin(double %x) {
+; CHECK-LABEL: testmsxd_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl llrint
+; CHECK-NEXT: pop {r11, pc}
entry:
%0 = tail call i64 @llvm.llrint.i64.f64(double %x)
ret i64 %0
}
-; FIXME(#44744): incorrect libcall
-; SOFTFP-LABEL: testmsxq_builtin:
-; SOFTFP: bl llrintl
-; HARDFP-LABEL: testmsxq_builtin:
-; HARDFP: bl llrintl
define i64 @testmsxq_builtin(fp128 %x) {
+; CHECK-LABEL: testmsxq_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl llrintl
+; CHECK-NEXT: pop {r11, pc}
entry:
%0 = tail call i64 @llvm.llrint.i64.f128(fp128 %x)
ret i64 %0
}
-
-declare i64 @llvm.llrint.i64.f32(float) nounwind readnone
-declare i64 @llvm.llrint.i64.f64(double) nounwind readnone
diff --git a/llvm/test/CodeGen/ARM/llvm.exp10.ll b/llvm/test/CodeGen/ARM/llvm.exp10.ll
index eb72fe8..49397ca 100644
--- a/llvm/test/CodeGen/ARM/llvm.exp10.ll
+++ b/llvm/test/CodeGen/ARM/llvm.exp10.ll
@@ -189,12 +189,13 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) {
; CHECK-NEXT: mov r6, r0
; CHECK-NEXT: mov r0, r4
; CHECK-NEXT: bl exp10f
+; CHECK-NEXT: mov r4, r0
; CHECK-NEXT: vmov s17, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl exp10f
; CHECK-NEXT: vmov s16, r0
+; CHECK-NEXT: mov r1, r4
; CHECK-NEXT: vmov s18, r6
-; CHECK-NEXT: vmov r0, r1, d8
; CHECK-NEXT: vmov r2, r3, d9
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: pop {r4, r5, r6, pc}
@@ -207,7 +208,6 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
; CHECK: @ %bb.0:
; CHECK-NEXT: push {r4, r5, r6, r7, lr}
; CHECK-NEXT: sub sp, #4
-; CHECK-NEXT: vpush {d8, d9}
; CHECK-NEXT: mov r6, r0
; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: mov r4, r3
@@ -216,17 +216,15 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
; CHECK-NEXT: mov r7, r0
; CHECK-NEXT: mov r0, r4
; CHECK-NEXT: bl exp10f
-; CHECK-NEXT: vmov s19, r0
+; CHECK-NEXT: mov r4, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl exp10f
-; CHECK-NEXT: vmov s18, r0
+; CHECK-NEXT: mov r5, r0
; CHECK-NEXT: mov r0, r6
-; CHECK-NEXT: vmov s17, r7
; CHECK-NEXT: bl exp10f
-; CHECK-NEXT: vmov s16, r0
-; CHECK-NEXT: vmov r2, r3, d9
-; CHECK-NEXT: vmov r0, r1, d8
-; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: mov r1, r7
+; CHECK-NEXT: mov r2, r5
+; CHECK-NEXT: mov r3, r4
; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop {r4, r5, r6, r7, pc}
%r = call <4 x float> @llvm.exp10.v4f32(<4 x float> %x)
diff --git a/llvm/test/CodeGen/ARM/llvm.frexp.ll b/llvm/test/CodeGen/ARM/llvm.frexp.ll
index 376426d..80972b75 100644
--- a/llvm/test/CodeGen/ARM/llvm.frexp.ll
+++ b/llvm/test/CodeGen/ARM/llvm.frexp.ll
@@ -362,33 +362,31 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) {
define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) {
; CHECK-LABEL: test_frexp_v4f32_v4i32_only_use_fract:
; CHECK: @ %bb.0:
-; CHECK-NEXT: push {r4, r5, r6, lr}
-; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: sub sp, #16
-; CHECK-NEXT: mov r5, r1
-; CHECK-NEXT: mov r6, r0
-; CHECK-NEXT: mov r1, sp
-; CHECK-NEXT: mov r0, r3
-; CHECK-NEXT: mov r4, r2
-; CHECK-NEXT: bl frexpf
+; CHECK-NEXT: push {r4, r5, r6, r7, lr}
+; CHECK-NEXT: sub sp, #20
+; CHECK-NEXT: mov r6, r1
; CHECK-NEXT: add r1, sp, #4
-; CHECK-NEXT: vmov s19, r0
-; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: mov r7, r0
+; CHECK-NEXT: mov r0, r3
+; CHECK-NEXT: mov r5, r2
; CHECK-NEXT: bl frexpf
; CHECK-NEXT: add r1, sp, #8
-; CHECK-NEXT: vmov s18, r0
+; CHECK-NEXT: mov r4, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl frexpf
; CHECK-NEXT: add r1, sp, #12
-; CHECK-NEXT: vmov s17, r0
+; CHECK-NEXT: mov r5, r0
; CHECK-NEXT: mov r0, r6
; CHECK-NEXT: bl frexpf
-; CHECK-NEXT: vmov s16, r0
-; CHECK-NEXT: vmov r2, r3, d9
-; CHECK-NEXT: vmov r0, r1, d8
-; CHECK-NEXT: add sp, #16
-; CHECK-NEXT: vpop {d8, d9}
-; CHECK-NEXT: pop {r4, r5, r6, pc}
+; CHECK-NEXT: add r1, sp, #16
+; CHECK-NEXT: mov r6, r0
+; CHECK-NEXT: mov r0, r7
+; CHECK-NEXT: bl frexpf
+; CHECK-NEXT: mov r1, r6
+; CHECK-NEXT: mov r2, r5
+; CHECK-NEXT: mov r3, r4
+; CHECK-NEXT: add sp, #20
+; CHECK-NEXT: pop {r4, r5, r6, r7, pc}
%result = call { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float> %a)
%result.0 = extractvalue { <4 x float>, <4 x i32> } %result, 0
ret <4 x float> %result.0
diff --git a/llvm/test/CodeGen/ARM/lrint-conv.ll b/llvm/test/CodeGen/ARM/lrint-conv.ll
index 9aa9511..23a2685 100644
--- a/llvm/test/CodeGen/ARM/lrint-conv.ll
+++ b/llvm/test/CodeGen/ARM/lrint-conv.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP
-; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=armv7-none-eabi -float-abi=soft | FileCheck %s --check-prefixes=CHECK,CHECK-SOFT
+; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16
+; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2,+fullfp16 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
; FIXME: crash
; define i32 @testmswh_builtin(half %x) {
@@ -8,36 +10,37 @@
; ret i32 %0
; }
-; SOFTFP-LABEL: testmsws_builtin:
-; SOFTFP: bl lrintf
-; HARDFP-LABEL: testmsws_builtin:
-; HARDFP: bl lrintf
define i32 @testmsws_builtin(float %x) {
+; CHECK-LABEL: testmsws_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: b lrintf
entry:
%0 = tail call i32 @llvm.lrint.i32.f32(float %x)
ret i32 %0
}
-; SOFTFP-LABEL: testmswd_builtin:
-; SOFTFP: bl lrint
-; HARDFP-LABEL: testmswd_builtin:
-; HARDFP: bl lrint
define i32 @testmswd_builtin(double %x) {
+; CHECK-LABEL: testmswd_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: b lrint
entry:
%0 = tail call i32 @llvm.lrint.i32.f64(double %x)
ret i32 %0
}
-; FIXME(#44744): incorrect libcall
-; SOFTFP-LABEL: testmswq_builtin:
-; SOFTFP: bl lrintl
-; HARDFP-LABEL: testmswq_builtin:
-; HARDFP: bl lrintl
define i32 @testmswq_builtin(fp128 %x) {
+; CHECK-LABEL: testmswq_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl lrintl
+; CHECK-NEXT: pop {r11, pc}
entry:
%0 = tail call i32 @llvm.lrint.i32.f128(fp128 %x)
ret i32 %0
}
-declare i32 @llvm.lrint.i32.f32(float) nounwind readnone
-declare i32 @llvm.lrint.i32.f64(double) nounwind readnone
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-FP16: {{.*}}
+; CHECK-NOFP16: {{.*}}
+; CHECK-SOFT: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/vector-lrint.ll b/llvm/test/CodeGen/ARM/vector-lrint.ll
index fe5e3cb..c1159da 100644
--- a/llvm/test/CodeGen/ARM/vector-lrint.ll
+++ b/llvm/test/CodeGen/ARM/vector-lrint.ll
@@ -14,31 +14,26 @@
; %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half> %x)
; ret <1 x iXLen> %a
; }
-; declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half>)
; define <2 x iXLen> @lrint_v2f16(<2 x half> %x) {
; %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half> %x)
; ret <2 x iXLen> %a
; }
-; declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half>)
; define <4 x iXLen> @lrint_v4f16(<4 x half> %x) {
; %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half> %x)
; ret <4 x iXLen> %a
; }
-; declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half>)
; define <8 x iXLen> @lrint_v8f16(<8 x half> %x) {
; %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half> %x)
; ret <8 x iXLen> %a
; }
-; declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half>)
; define <16 x iXLen> @lrint_v16f16(<16 x half> %x) {
; %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half> %x)
; ret <16 x iXLen> %a
; }
-; declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half>)
define <1 x iXLen> @lrint_v1f32(<1 x float> %x) {
; LE-I32-LABEL: lrint_v1f32:
@@ -76,7 +71,6 @@ define <1 x iXLen> @lrint_v1f32(<1 x float> %x) {
%a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float> %x)
ret <1 x iXLen> %a
}
-declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float>)
define <2 x iXLen> @lrint_v2f32(<2 x float> %x) {
; LE-I32-LABEL: lrint_v2f32:
@@ -160,7 +154,6 @@ define <2 x iXLen> @lrint_v2f32(<2 x float> %x) {
%a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float> %x)
ret <2 x iXLen> %a
}
-declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float>)
define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
; LE-I32-LABEL: lrint_v4f32:
@@ -274,7 +267,6 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
%a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float> %x)
ret <4 x iXLen> %a
}
-declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float>)
define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
; LE-I32-LABEL: lrint_v8f32:
@@ -488,7 +480,6 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
%a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float> %x)
ret <8 x iXLen> %a
}
-declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float>)
define <16 x iXLen> @lrint_v16f32(<16 x float> %x) {
; LE-I32-LABEL: lrint_v16f32:
@@ -1005,7 +996,6 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x) {
%a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float> %x)
ret <16 x iXLen> %a
}
-declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float>)
define <1 x iXLen> @lrint_v1f64(<1 x double> %x) {
; LE-I32-LABEL: lrint_v1f64:
@@ -1043,7 +1033,6 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x) {
%a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double> %x)
ret <1 x iXLen> %a
}
-declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double>)
define <2 x iXLen> @lrint_v2f64(<2 x double> %x) {
; LE-I32-LABEL: lrint_v2f64:
@@ -1120,7 +1109,6 @@ define <2 x iXLen> @lrint_v2f64(<2 x double> %x) {
%a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double> %x)
ret <2 x iXLen> %a
}
-declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double>)
define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
; LE-I32-LABEL: lrint_v4f64:
@@ -1237,7 +1225,6 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
%a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double> %x)
ret <4 x iXLen> %a
}
-declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double>)
define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
; LE-I32-LABEL: lrint_v8f64:
@@ -1467,7 +1454,6 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
%a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double> %x)
ret <8 x iXLen> %a
}
-declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double>)
define <16 x iXLen> @lrint_v16f64(<16 x double> %x) {
; LE-I32-LABEL: lrint_v16f64:
@@ -2053,7 +2039,6 @@ define <16 x iXLen> @lrint_v16f64(<16 x double> %x) {
%a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f64(<16 x double> %x)
ret <16 x iXLen> %a
}
-declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f64(<16 x double>)
define <1 x iXLen> @lrint_v1fp128(<1 x fp128> %x) {
; LE-I32-LABEL: lrint_v1fp128:
@@ -2091,7 +2076,6 @@ define <1 x iXLen> @lrint_v1fp128(<1 x fp128> %x) {
%a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1fp128(<1 x fp128> %x)
ret <1 x iXLen> %a
}
-declare <1 x iXLen> @llvm.lrint.v1iXLen.v1fp128(<1 x fp128>)
define <2 x iXLen> @lrint_v2fp128(<2 x fp128> %x) {
; LE-I32-LABEL: lrint_v2fp128:
@@ -2194,7 +2178,6 @@ define <2 x iXLen> @lrint_v2fp128(<2 x fp128> %x) {
%a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2fp128(<2 x fp128> %x)
ret <2 x iXLen> %a
}
-declare <2 x iXLen> @llvm.lrint.v2iXLen.v2fp128(<2 x fp128>)
define <4 x iXLen> @lrint_v4fp128(<4 x fp128> %x) {
; LE-I32-LABEL: lrint_v4fp128:
@@ -2347,7 +2330,6 @@ define <4 x iXLen> @lrint_v4fp128(<4 x fp128> %x) {
%a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4fp128(<4 x fp128> %x)
ret <4 x iXLen> %a
}
-declare <4 x iXLen> @llvm.lrint.v4iXLen.v4fp128(<4 x fp128>)
define <8 x iXLen> @lrint_v8fp128(<8 x fp128> %x) {
; LE-I32-LABEL: lrint_v8fp128:
@@ -2664,7 +2646,6 @@ define <8 x iXLen> @lrint_v8fp128(<8 x fp128> %x) {
%a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8fp128(<8 x fp128> %x)
ret <8 x iXLen> %a
}
-declare <8 x iXLen> @llvm.lrint.v8iXLen.v8fp128(<8 x fp128>)
define <16 x iXLen> @lrint_v16fp128(<16 x fp128> %x) {
; LE-I32-LABEL: lrint_v16fp128:
@@ -3262,4 +3243,3 @@ define <16 x iXLen> @lrint_v16fp128(<16 x fp128> %x) {
%a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16fp128(<16 x fp128> %x)
ret <16 x iXLen> %a
}
-declare <16 x iXLen> @llvm.lrint.v16iXLen.v16fp128(<16 x fp128>)
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll
index 288dea0..b043ea1 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 666, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 666, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll
index e9abcf9..8219ffd 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 666, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 666, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll
index 238f488..31d8dd1 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 666, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 666, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll
index 8dc69eb..2bb4af5 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 666, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 666, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll
index b2c8faf..62fda73 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 666, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 666, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll
index 758d262..7e8de14 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 45, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 45, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll
new file mode 100644
index 0000000..8f7ef88
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll
@@ -0,0 +1,19 @@
+; RUN: not opt -passes='print<dxil-root-signature>' %s -S -o - 2>&1 | FileCheck %s
+
+
+target triple = "dxil-unknown-shadermodel6.0-compute"
+
+; CHECK: error: Invalid value for Static Sampler Flag: 4
+; CHECK-NOT: Root Signature Definitions
+
+define void @main() #0 {
+entry:
+ ret void
+}
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+
+
+!dx.rootsignatures = !{!2} ; list of function/root signature pairs
+!2 = !{ ptr @main, !3, i32 3 } ; function, root signature
+!3 = !{ !5 } ; list of root signature elements
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 4 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll
index 47d4b52..312e769 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 666, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 666, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
index 855e0c0..80fd208 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 0x7FF8000000000000, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 0x7FF8000000000000, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
index 812749b..5daaf69 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float 0x7FF8000000000000, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float 0x7FF8000000000000, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
index 6898aec..423987b 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 6.660000e+02, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 6.660000e+02, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll
index dc6ee42..af630dc 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 4294967280, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 4294967280, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll
index 6cee1dd9..bd752f0 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 4294967295, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 4294967295, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll
index fa5bf12..ca0c02d 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 666 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 666, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll
index 1dd470d..77c5c7a 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll
@@ -15,7 +15,7 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
; DXC: - Name: RTS0
; DXC-NEXT: Size: 76
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll
new file mode 100644
index 0000000..7e56f04
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll
@@ -0,0 +1,42 @@
+; RUN: opt %s -dxil-embed -dxil-globals -S -o - | FileCheck %s
+; RUN: llc %s --filetype=obj -o - | obj2yaml | FileCheck %s --check-prefix=DXC
+
+target triple = "dxil-unknown-shadermodel6.0-compute"
+
+; CHECK: @dx.rts0 = private constant [248 x i8] c"{{.*}}", section "RTS0", align 4
+
+define void @main() #0 {
+entry:
+ ret void
+}
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+
+
+!dx.rootsignatures = !{!2} ; list of function/root signature pairs
+!2 = !{ ptr @main, !3, i32 3 } ; function, root signature
+!3 = !{ !5, !6, !7, !8 } ; list of root signature elements
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 1 }
+!6 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 43, i32 0, i32 0, i32 2 }
+!7 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 44, i32 0, i32 0, i32 0 }
+!8 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 45, i32 0, i32 0, i32 3 }
+
+; DXC: - Name: RTS0
+; DXC-NEXT: Size: 248
+; DXC-NEXT: RootSignature:
+; DXC-NEXT: Version: 3
+; DXC-NEXT: NumRootParameters: 0
+; DXC-NEXT: RootParametersOffset: 24
+; DXC-NEXT: NumStaticSamplers: 4
+; DXC-NEXT: StaticSamplersOffset: 24
+; DXC-NEXT: Parameters: []
+; DXC-NEXT: Samplers:
+; DXC-LABEL: ShaderRegister: 42
+; DXC: SAMPLER_FLAG_UINT_BORDER_COLOR: true
+; DXC-LABEL: ShaderRegister: 43
+; DXC: SAMPLER_FLAG_NON_NORMALIZED_COORDINATES: true
+; DXC-LABEL: ShaderRegister: 44
+; DXC-NOT: SAMPLER_FLAG_NON_NORMALIZED_COORDINATES:
+; DXC-NOT: SAMPLER_FLAG_UINT_BORDER_COLOR:
+; DXC-LABEL: ShaderRegister: 45
+; DXC: SAMPLER_FLAG_UINT_BORDER_COLOR: true
+; DXC-NEXT: SAMPLER_FLAG_NON_NORMALIZED_COORDINATES: true
diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll
index c244095..b68606d 100644
--- a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll
+++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll
@@ -10,6 +10,6 @@ entry:
!0 = !{ptr @CSMain, !1, i32 2}
!1 = !{!2, !3}
-!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0 }
+!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0, i32 0 }
!3 = !{!"DescriptorTable", i32 0, !4}
!4 = !{!"Sampler", i32 1, i32 42, i32 0, i32 -1, i32 0}
diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll
index 9ac02ebb..7c836e2 100644
--- a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll
+++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll
@@ -10,5 +10,5 @@ entry:
!0 = !{ptr @CSMain, !1, i32 2}
!1 = !{!2, !3}
-!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0 }
-!3 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0, i32 0 }
+!3 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/Hexagon/unaligned-vec-store.ll b/llvm/test/CodeGen/Hexagon/unaligned-vec-store.ll
new file mode 100644
index 0000000..267e365
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/unaligned-vec-store.ll
@@ -0,0 +1,23 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv68 -mattr=+hvxv68,+hvx-length128B < %s | FileCheck %s
+; REQUIRES: asserts
+
+; Check that the test does not assert when unaligned vector store V6_vS32Ub_npred_ai is generated.
+; CHECK: if (!p{{[0-3]}}) vmemu
+
+target triple = "hexagon-unknown-unknown-elf"
+
+define fastcc void @test(i1 %cmp.i.i) {
+entry:
+ %call.i.i.i172 = load ptr, ptr null, align 4
+ %add.ptr = getelementptr i8, ptr %call.i.i.i172, i32 1
+ store <32 x i32> zeroinitializer, ptr %add.ptr, align 128
+ %add.ptr4.i4 = getelementptr i8, ptr %call.i.i.i172, i32 129
+ br i1 %cmp.i.i, label %common.ret, label %if.end.i.i
+
+common.ret: ; preds = %if.end.i.i, %entry
+ ret void
+
+if.end.i.i: ; preds = %entry
+ store <32 x i32> zeroinitializer, ptr %add.ptr4.i4, align 1
+ br label %common.ret
+}
diff --git a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll
index 7c9f375..40d36fb 100644
--- a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll
+++ b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll
@@ -97,7 +97,6 @@ entry:
; ALL: lw $[[R0:[0-9]+]], %got(v4f32)(
; ALL: ld.w $w12, 0($[[R0]])
; ALL: move.v $w[[W0:13]], $w12
-; NOODDSPREG: move.v $w[[W0:12]], $w13
; ALL: teqi $zero, 1
; ALL-NOT: st.w
; ALL-NOT: ld.w
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll b/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll
index 18fb879..21ca041 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll
@@ -115,5 +115,150 @@ define ptx_kernel void @inlineasm(ptr %p) {
store <2 x float> %mul, ptr %p, align 8
ret void
}
+
+define ptx_kernel void @trunc_v2i32(<2 x i32> %0) {
+; CHECK-SM90A-LABEL: trunc_v2i32(
+; CHECK-SM90A: {
+; CHECK-SM90A-NEXT: .reg .b32 %r<7>;
+; CHECK-SM90A-NEXT: .reg .b64 %rd<2>;
+; CHECK-SM90A-EMPTY:
+; CHECK-SM90A-NEXT: // %bb.0:
+; CHECK-SM90A-NEXT: ld.param.v2.b32 {%r1, %r2}, [trunc_v2i32_param_0];
+; CHECK-SM90A-NEXT: prmt.b32 %r3, %r1, %r2, 0x3340U;
+; CHECK-SM90A-NEXT: mov.b32 %r4, 0;
+; CHECK-SM90A-NEXT: prmt.b32 %r5, %r4, 0, 0x3340U;
+; CHECK-SM90A-NEXT: prmt.b32 %r6, %r5, %r3, 0x5410U;
+; CHECK-SM90A-NEXT: mov.b64 %rd1, 0;
+; CHECK-SM90A-NEXT: st.b32 [%rd1], %r6;
+; CHECK-SM90A-NEXT: ret;
+;
+; CHECK-SM100-LABEL: trunc_v2i32(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<7>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<3>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b64 %rd1, [trunc_v2i32_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM100-NEXT: mov.b32 %r3, 0;
+; CHECK-SM100-NEXT: prmt.b32 %r4, %r3, 0, 0x3340U;
+; CHECK-SM100-NEXT: prmt.b32 %r5, %r1, %r2, 0x3340U;
+; CHECK-SM100-NEXT: prmt.b32 %r6, %r4, %r5, 0x5410U;
+; CHECK-SM100-NEXT: mov.b64 %rd2, 0;
+; CHECK-SM100-NEXT: st.b32 [%rd2], %r6;
+; CHECK-SM100-NEXT: ret;
+ %2 = trunc <2 x i32> %0 to <2 x i8>
+ %3 = shufflevector <2 x i8> zeroinitializer, <2 x i8> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x i8> %3, ptr null, align 4
+ ret void
+}
+
+define ptx_kernel void @zextend_to_v2i32(<2 x i8> %0) {
+; CHECK-SM90A-LABEL: zextend_to_v2i32(
+; CHECK-SM90A: {
+; CHECK-SM90A-NEXT: .reg .b16 %rs<3>;
+; CHECK-SM90A-NEXT: .reg .b32 %r<4>;
+; CHECK-SM90A-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM90A-EMPTY:
+; CHECK-SM90A-NEXT: // %bb.0:
+; CHECK-SM90A-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [zextend_to_v2i32_param_0];
+; CHECK-SM90A-NEXT: mov.b32 %r1, {%rs1, %rs2};
+; CHECK-SM90A-NEXT: cvt.u32.u16 %r2, %rs1;
+; CHECK-SM90A-NEXT: cvt.u32.u16 %r3, %rs2;
+; CHECK-SM90A-NEXT: mov.b64 %rd1, 12;
+; CHECK-SM90A-NEXT: st.b32 [%rd1], %r3;
+; CHECK-SM90A-NEXT: mov.b64 %rd2, 8;
+; CHECK-SM90A-NEXT: st.b32 [%rd2], %r2;
+; CHECK-SM90A-NEXT: mov.b64 %rd3, 4;
+; CHECK-SM90A-NEXT: st.b32 [%rd3], 0;
+; CHECK-SM90A-NEXT: mov.b64 %rd4, 0;
+; CHECK-SM90A-NEXT: st.b32 [%rd4], 0;
+; CHECK-SM90A-NEXT: ret;
+;
+; CHECK-SM100-LABEL: zextend_to_v2i32(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b16 %rs<3>;
+; CHECK-SM100-NEXT: .reg .b32 %r<5>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<8>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [zextend_to_v2i32_param_0];
+; CHECK-SM100-NEXT: mov.b32 %r1, {%rs1, %rs2};
+; CHECK-SM100-NEXT: cvt.u32.u16 %r2, %rs2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r3, %rs1;
+; CHECK-SM100-NEXT: mov.b64 %rd1, {%r3, %r2};
+; CHECK-SM100-NEXT: mov.b32 %r4, 0;
+; CHECK-SM100-NEXT: mov.b64 %rd2, {%r4, %r4};
+; CHECK-SM100-NEXT: mov.b64 %rd3, 4;
+; CHECK-SM100-NEXT: st.b32 [%rd3], %rd2;
+; CHECK-SM100-NEXT: mov.b64 %rd4, 0;
+; CHECK-SM100-NEXT: st.b32 [%rd4], %rd2;
+; CHECK-SM100-NEXT: mov.b64 %rd5, 8;
+; CHECK-SM100-NEXT: st.b32 [%rd5], %rd1;
+; CHECK-SM100-NEXT: shr.u64 %rd6, %rd1, 32;
+; CHECK-SM100-NEXT: mov.b64 %rd7, 12;
+; CHECK-SM100-NEXT: st.b32 [%rd7], %rd6;
+; CHECK-SM100-NEXT: ret;
+ %2 = zext <2 x i8> %0 to <2 x i32>
+ %3 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x i32> %3, ptr null, align 4
+ ret void
+}
+
+define ptx_kernel void @sextend_to_v2i32(<2 x i8> %0) {
+; CHECK-SM90A-LABEL: sextend_to_v2i32(
+; CHECK-SM90A: {
+; CHECK-SM90A-NEXT: .reg .b16 %rs<3>;
+; CHECK-SM90A-NEXT: .reg .b32 %r<6>;
+; CHECK-SM90A-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM90A-EMPTY:
+; CHECK-SM90A-NEXT: // %bb.0:
+; CHECK-SM90A-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [sextend_to_v2i32_param_0];
+; CHECK-SM90A-NEXT: mov.b32 %r1, {%rs1, %rs2};
+; CHECK-SM90A-NEXT: cvt.u32.u16 %r2, %rs1;
+; CHECK-SM90A-NEXT: cvt.s32.s8 %r3, %r2;
+; CHECK-SM90A-NEXT: cvt.u32.u16 %r4, %rs2;
+; CHECK-SM90A-NEXT: cvt.s32.s8 %r5, %r4;
+; CHECK-SM90A-NEXT: mov.b64 %rd1, 12;
+; CHECK-SM90A-NEXT: st.b32 [%rd1], %r5;
+; CHECK-SM90A-NEXT: mov.b64 %rd2, 8;
+; CHECK-SM90A-NEXT: st.b32 [%rd2], %r3;
+; CHECK-SM90A-NEXT: mov.b64 %rd3, 4;
+; CHECK-SM90A-NEXT: st.b32 [%rd3], 0;
+; CHECK-SM90A-NEXT: mov.b64 %rd4, 0;
+; CHECK-SM90A-NEXT: st.b32 [%rd4], 0;
+; CHECK-SM90A-NEXT: ret;
+;
+; CHECK-SM100-LABEL: sextend_to_v2i32(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b16 %rs<3>;
+; CHECK-SM100-NEXT: .reg .b32 %r<7>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<8>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [sextend_to_v2i32_param_0];
+; CHECK-SM100-NEXT: mov.b32 %r1, {%rs1, %rs2};
+; CHECK-SM100-NEXT: cvt.u32.u16 %r2, %rs2;
+; CHECK-SM100-NEXT: cvt.s32.s8 %r3, %r2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r4, %rs1;
+; CHECK-SM100-NEXT: cvt.s32.s8 %r5, %r4;
+; CHECK-SM100-NEXT: mov.b64 %rd1, {%r5, %r3};
+; CHECK-SM100-NEXT: mov.b32 %r6, 0;
+; CHECK-SM100-NEXT: mov.b64 %rd2, {%r6, %r6};
+; CHECK-SM100-NEXT: mov.b64 %rd3, 4;
+; CHECK-SM100-NEXT: st.b32 [%rd3], %rd2;
+; CHECK-SM100-NEXT: mov.b64 %rd4, 0;
+; CHECK-SM100-NEXT: st.b32 [%rd4], %rd2;
+; CHECK-SM100-NEXT: mov.b64 %rd5, 8;
+; CHECK-SM100-NEXT: st.b32 [%rd5], %rd1;
+; CHECK-SM100-NEXT: shr.u64 %rd6, %rd1, 32;
+; CHECK-SM100-NEXT: mov.b64 %rd7, 12;
+; CHECK-SM100-NEXT: st.b32 [%rd7], %rd6;
+; CHECK-SM100-NEXT: ret;
+ %2 = sext <2 x i8> %0 to <2 x i32>
+ %3 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x i32> %3, ptr null, align 4
+ ret void
+}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll b/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll
index a2ad294..98314a0 100644
--- a/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll
@@ -897,31 +897,31 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) {
; P8LE-NEXT: mfvsrd r6, v2
; P8LE-NEXT: mfvsrd r8, v3
; P8LE-NEXT: ori r3, r3, 51289
+; P8LE-NEXT: mffprd r4, f0
; P8LE-NEXT: ori r5, r5, 42889
-; P8LE-NEXT: rldic r4, r3, 36, 1
-; P8LE-NEXT: mffprd r3, f0
+; P8LE-NEXT: rldic r3, r3, 36, 1
; P8LE-NEXT: rldic r5, r5, 35, 1
; P8LE-NEXT: rldicl r7, r6, 63, 1
-; P8LE-NEXT: oris r4, r4, 45590
+; P8LE-NEXT: oris r3, r3, 45590
; P8LE-NEXT: oris r5, r5, 1603
-; P8LE-NEXT: ori r4, r4, 17097
+; P8LE-NEXT: ori r3, r3, 17097
; P8LE-NEXT: ori r5, r5, 21445
-; P8LE-NEXT: mulhdu r4, r3, r4
+; P8LE-NEXT: mulhdu r3, r4, r3
; P8LE-NEXT: mulhdu r5, r7, r5
-; P8LE-NEXT: sub r7, r3, r4
+; P8LE-NEXT: sub r7, r4, r3
; P8LE-NEXT: rldicl r5, r5, 57, 7
; P8LE-NEXT: rldicl r7, r7, 63, 1
; P8LE-NEXT: mulli r5, r5, 654
-; P8LE-NEXT: add r4, r7, r4
+; P8LE-NEXT: add r3, r7, r3
; P8LE-NEXT: lis r7, -16037
; P8LE-NEXT: ori r7, r7, 28749
-; P8LE-NEXT: rldicl r4, r4, 60, 4
+; P8LE-NEXT: rldicl r3, r3, 60, 4
; P8LE-NEXT: sub r5, r6, r5
; P8LE-NEXT: rldic r7, r7, 32, 0
-; P8LE-NEXT: mulli r4, r4, 23
+; P8LE-NEXT: mulli r3, r3, 23
; P8LE-NEXT: oris r7, r7, 52170
; P8LE-NEXT: ori r7, r7, 12109
-; P8LE-NEXT: sub r3, r3, r4
+; P8LE-NEXT: sub r3, r4, r3
; P8LE-NEXT: mulhdu r7, r8, r7
; P8LE-NEXT: mtfprd f1, r3
; P8LE-NEXT: li r3, 0
diff --git a/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
index 435b0ab..816b12e 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
@@ -35,12 +35,12 @@ define i64 @test2elt(<2 x i64> %a) local_unnamed_addr #0 {
;
; CHECK-BE-LABEL: test2elt:
; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: xscvuxdsp f0, v2
+; CHECK-BE-NEXT: xscvdpspn v3, f0
; CHECK-BE-NEXT: xxswapd vs0, v2
-; CHECK-BE-NEXT: xscvuxdsp f1, v2
; CHECK-BE-NEXT: xscvuxdsp f0, f0
-; CHECK-BE-NEXT: xscvdpspn v2, f1
-; CHECK-BE-NEXT: xscvdpspn v3, f0
-; CHECK-BE-NEXT: vmrgow v2, v2, v3
+; CHECK-BE-NEXT: xscvdpspn v2, f0
+; CHECK-BE-NEXT: vmrgow v2, v3, v2
; CHECK-BE-NEXT: mfvsrd r3, v2
; CHECK-BE-NEXT: blr
entry:
@@ -327,12 +327,12 @@ define i64 @test2elt_signed(<2 x i64> %a) local_unnamed_addr #0 {
;
; CHECK-BE-LABEL: test2elt_signed:
; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: xscvsxdsp f0, v2
+; CHECK-BE-NEXT: xscvdpspn v3, f0
; CHECK-BE-NEXT: xxswapd vs0, v2
-; CHECK-BE-NEXT: xscvsxdsp f1, v2
; CHECK-BE-NEXT: xscvsxdsp f0, f0
-; CHECK-BE-NEXT: xscvdpspn v2, f1
-; CHECK-BE-NEXT: xscvdpspn v3, f0
-; CHECK-BE-NEXT: vmrgow v2, v2, v3
+; CHECK-BE-NEXT: xscvdpspn v2, f0
+; CHECK-BE-NEXT: vmrgow v2, v3, v2
; CHECK-BE-NEXT: mfvsrd r3, v2
; CHECK-BE-NEXT: blr
entry:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll
index 9a1ed8f..1d5d918 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll
@@ -37,7 +37,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
;
; RV32IA-LABEL: atomic_load_i8_unordered:
; RV32IA: # %bb.0:
-; RV32IA-NEXT: lb a0, 0(a0)
+; RV32IA-NEXT: lbu a0, 0(a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_unordered:
@@ -52,7 +52,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
;
; RV64IA-LABEL: atomic_load_i8_unordered:
; RV64IA: # %bb.0:
-; RV64IA-NEXT: lb a0, 0(a0)
+; RV64IA-NEXT: lbu a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i8, ptr %a unordered, align 1
ret i8 %1
@@ -71,7 +71,7 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind {
;
; RV32IA-LABEL: atomic_load_i8_monotonic:
; RV32IA: # %bb.0:
-; RV32IA-NEXT: lb a0, 0(a0)
+; RV32IA-NEXT: lbu a0, 0(a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_monotonic:
@@ -86,7 +86,7 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind {
;
; RV64IA-LABEL: atomic_load_i8_monotonic:
; RV64IA: # %bb.0:
-; RV64IA-NEXT: lb a0, 0(a0)
+; RV64IA-NEXT: lbu a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i8, ptr %a monotonic, align 1
ret i8 %1
@@ -105,13 +105,13 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind {
;
; RV32IA-WMO-LABEL: atomic_load_i8_acquire:
; RV32IA-WMO: # %bb.0:
-; RV32IA-WMO-NEXT: lb a0, 0(a0)
+; RV32IA-WMO-NEXT: lbu a0, 0(a0)
; RV32IA-WMO-NEXT: fence r, rw
; RV32IA-WMO-NEXT: ret
;
; RV32IA-TSO-LABEL: atomic_load_i8_acquire:
; RV32IA-TSO: # %bb.0:
-; RV32IA-TSO-NEXT: lb a0, 0(a0)
+; RV32IA-TSO-NEXT: lbu a0, 0(a0)
; RV32IA-TSO-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_acquire:
@@ -126,35 +126,35 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind {
;
; RV64IA-WMO-LABEL: atomic_load_i8_acquire:
; RV64IA-WMO: # %bb.0:
-; RV64IA-WMO-NEXT: lb a0, 0(a0)
+; RV64IA-WMO-NEXT: lbu a0, 0(a0)
; RV64IA-WMO-NEXT: fence r, rw
; RV64IA-WMO-NEXT: ret
;
; RV64IA-TSO-LABEL: atomic_load_i8_acquire:
; RV64IA-TSO: # %bb.0:
-; RV64IA-TSO-NEXT: lb a0, 0(a0)
+; RV64IA-TSO-NEXT: lbu a0, 0(a0)
; RV64IA-TSO-NEXT: ret
;
; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire:
; RV32IA-WMO-TRAILING-FENCE: # %bb.0:
-; RV32IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV32IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
; RV32IA-WMO-TRAILING-FENCE-NEXT: ret
;
; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire:
; RV32IA-TSO-TRAILING-FENCE: # %bb.0:
-; RV32IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV32IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV32IA-TSO-TRAILING-FENCE-NEXT: ret
;
; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire:
; RV64IA-WMO-TRAILING-FENCE: # %bb.0:
-; RV64IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV64IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
; RV64IA-WMO-TRAILING-FENCE-NEXT: ret
;
; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire:
; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
-; RV64IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV64IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
%1 = load atomic i8, ptr %a acquire, align 1
ret i8 %1
@@ -174,14 +174,14 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind {
; RV32IA-WMO-LABEL: atomic_load_i8_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: fence rw, rw
-; RV32IA-WMO-NEXT: lb a0, 0(a0)
+; RV32IA-WMO-NEXT: lbu a0, 0(a0)
; RV32IA-WMO-NEXT: fence r, rw
; RV32IA-WMO-NEXT: ret
;
; RV32IA-TSO-LABEL: atomic_load_i8_seq_cst:
; RV32IA-TSO: # %bb.0:
; RV32IA-TSO-NEXT: fence rw, rw
-; RV32IA-TSO-NEXT: lb a0, 0(a0)
+; RV32IA-TSO-NEXT: lbu a0, 0(a0)
; RV32IA-TSO-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_seq_cst:
@@ -197,40 +197,40 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind {
; RV64IA-WMO-LABEL: atomic_load_i8_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, rw
-; RV64IA-WMO-NEXT: lb a0, 0(a0)
+; RV64IA-WMO-NEXT: lbu a0, 0(a0)
; RV64IA-WMO-NEXT: fence r, rw
; RV64IA-WMO-NEXT: ret
;
; RV64IA-TSO-LABEL: atomic_load_i8_seq_cst:
; RV64IA-TSO: # %bb.0:
; RV64IA-TSO-NEXT: fence rw, rw
-; RV64IA-TSO-NEXT: lb a0, 0(a0)
+; RV64IA-TSO-NEXT: lbu a0, 0(a0)
; RV64IA-TSO-NEXT: ret
;
; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst:
; RV32IA-WMO-TRAILING-FENCE: # %bb.0:
; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw
-; RV32IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV32IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
; RV32IA-WMO-TRAILING-FENCE-NEXT: ret
;
; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst:
; RV32IA-TSO-TRAILING-FENCE: # %bb.0:
; RV32IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
-; RV32IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV32IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV32IA-TSO-TRAILING-FENCE-NEXT: ret
;
; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst:
; RV64IA-WMO-TRAILING-FENCE: # %bb.0:
; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw
-; RV64IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV64IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
; RV64IA-WMO-TRAILING-FENCE-NEXT: ret
;
; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst:
; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
-; RV64IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV64IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
%1 = load atomic i8, ptr %a seq_cst, align 1
ret i8 %1
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
index 7204064..f1d17f9f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
@@ -505,6 +505,9 @@
# DEBUG-NEXT: G_FREM (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
+# DEBUG-NEXT: G_FMODF (opcode {{[0-9]+}}): 1 type index, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_FPOW (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
@@ -607,11 +610,11 @@
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_FMINIMUMNUM (opcode {{[0-9]+}}): 1 type index, 0 imm indices
-# DEBUG-NEXT: .. opcode 219 is aliased to 183
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: G_FMAXIMUMNUM (opcode {{[0-9]+}}): 1 type index, 0 imm indices
-# DEBUG-NEXT: .. opcode 220 is aliased to 183
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: G_GET_FPENV (opcode {{[0-9]+}}): 1 type index, 0 imm indices
diff --git a/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll b/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll
index f8b1d50..edec1d0 100644
--- a/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll
+++ b/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll
@@ -11,6 +11,8 @@
; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-NOZICOND,SFB-NOZICOND-C %s
; RUN: llc -mtriple=riscv64 -mattr=+short-forward-branch-opt,+zicond -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-ZICOND %s
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
; The conditional move optimization in sifive-p450 requires that only a
; single c.mv instruction appears in the branch shadow.
@@ -42,6 +44,14 @@ define signext i32 @test1(i32 signext %x, i32 signext %y, i32 signext %z) {
; SHORT_FORWARD-NEXT: xor a0, a0, a1
; SHORT_FORWARD-NEXT: .LBB0_2:
; SHORT_FORWARD-NEXT: ret
+;
+; RV32IXQCI-LABEL: test1:
+; RV32IXQCI: # %bb.0:
+; RV32IXQCI-NEXT: bnez a2, .LBB0_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB0_2:
+; RV32IXQCI-NEXT: ret
%c = icmp eq i32 %z, 0
%a = xor i32 %x, %y
%b = select i1 %c, i32 %a, i32 %x
@@ -73,6 +83,14 @@ define signext i32 @test2(i32 signext %x, i32 signext %y, i32 signext %z) {
; SHORT_FORWARD-NEXT: xor a0, a0, a1
; SHORT_FORWARD-NEXT: .LBB1_2:
; SHORT_FORWARD-NEXT: ret
+;
+; RV32IXQCI-LABEL: test2:
+; RV32IXQCI: # %bb.0:
+; RV32IXQCI-NEXT: beqz a2, .LBB1_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB1_2:
+; RV32IXQCI-NEXT: ret
%c = icmp eq i32 %z, 0
%a = xor i32 %x, %y
%b = select i1 %c, i32 %x, i32 %a
@@ -120,6 +138,19 @@ define signext i32 @test3(i32 signext %v, i32 signext %w, i32 signext %x, i32 si
; SHORT_FORWARD-NEXT: .LBB2_4:
; SHORT_FORWARD-NEXT: addw a0, a0, a2
; SHORT_FORWARD-NEXT: ret
+;
+; RV32IXQCI-LABEL: test3:
+; RV32IXQCI: # %bb.0:
+; RV32IXQCI-NEXT: beqz a4, .LBB2_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB2_2:
+; RV32IXQCI-NEXT: beqz a4, .LBB2_4
+; RV32IXQCI-NEXT: # %bb.3:
+; RV32IXQCI-NEXT: xor a2, a2, a3
+; RV32IXQCI-NEXT: .LBB2_4:
+; RV32IXQCI-NEXT: add a0, a0, a2
+; RV32IXQCI-NEXT: ret
%c = icmp eq i32 %z, 0
%a = xor i32 %v, %w
%b = select i1 %c, i32 %v, i32 %a
@@ -167,6 +198,12 @@ define signext i32 @test4(i32 signext %x, i32 signext %y, i32 signext %z) {
; SFB-ZICOND-NEXT: li a0, 3
; SFB-ZICOND-NEXT: czero.nez a0, a0, a2
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: test4:
+; RV32IXQCI: # %bb.0:
+; RV32IXQCI-NEXT: li a0, 0
+; RV32IXQCI-NEXT: qc.lieqi a0, a2, 0, 3
+; RV32IXQCI-NEXT: ret
%c = icmp eq i32 %z, 0
%a = select i1 %c, i32 3, i32 0
ret i32 %a
@@ -199,6 +236,15 @@ define i16 @select_xor_1(i16 %A, i8 %cond) {
; SHORT_FORWARD-NEXT: xori a0, a0, 43
; SHORT_FORWARD-NEXT: .LBB4_2: # %entry
; SHORT_FORWARD-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_xor_1:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a1, a1, 1
+; RV32IXQCI-NEXT: beqz a1, .LBB4_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xori a0, a0, 43
+; RV32IXQCI-NEXT: .LBB4_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
%cmp10 = icmp eq i8 %and, 0
@@ -236,6 +282,15 @@ define i16 @select_xor_1b(i16 %A, i8 %cond) {
; SHORT_FORWARD-NEXT: xori a0, a0, 43
; SHORT_FORWARD-NEXT: .LBB5_2: # %entry
; SHORT_FORWARD-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_xor_1b:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a1, a1, 1
+; RV32IXQCI-NEXT: beqz a1, .LBB5_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xori a0, a0, 43
+; RV32IXQCI-NEXT: .LBB5_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
%cmp10 = icmp ne i8 %and, 1
@@ -289,6 +344,15 @@ define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) {
; SFB-ZICOND-NEXT: xor a0, a1, a0
; SFB-ZICOND-NEXT: .LBB6_2: # %entry
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_xor_2:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB6_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB6_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
%cmp10 = icmp eq i8 %and, 0
@@ -344,6 +408,15 @@ define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) {
; SFB-ZICOND-NEXT: xor a0, a1, a0
; SFB-ZICOND-NEXT: .LBB7_2: # %entry
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_xor_2b:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB7_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB7_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
%cmp10 = icmp ne i8 %and, 1
@@ -397,6 +470,15 @@ define i32 @select_or(i32 %A, i32 %B, i8 %cond) {
; SFB-ZICOND-NEXT: or a0, a1, a0
; SFB-ZICOND-NEXT: .LBB8_2: # %entry
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_or:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB8_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB8_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
%cmp10 = icmp eq i8 %and, 0
@@ -452,6 +534,15 @@ define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) {
; SFB-ZICOND-NEXT: or a0, a1, a0
; SFB-ZICOND-NEXT: .LBB9_2: # %entry
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_or_b:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB9_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB9_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
%cmp10 = icmp ne i8 %and, 1
@@ -505,6 +596,15 @@ define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) {
; SFB-ZICOND-NEXT: or a0, a1, a0
; SFB-ZICOND-NEXT: .LBB10_2: # %entry
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_or_1:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB10_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB10_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i32 %cond, 1
%cmp10 = icmp eq i32 %and, 0
@@ -560,6 +660,15 @@ define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) {
; SFB-ZICOND-NEXT: or a0, a1, a0
; SFB-ZICOND-NEXT: .LBB11_2: # %entry
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_or_1b:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB11_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB11_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i32 %cond, 1
%cmp10 = icmp ne i32 %and, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
index 9173fa4..cc1282a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/expandload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
@@ -1666,20 +1666,20 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_32: # %else114
; CHECK-RV32-NEXT: slli a2, a3, 1
; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vsrl.vx v16, v0, a1
+; CHECK-RV32-NEXT: vsrl.vx v24, v0, a1
; CHECK-RV32-NEXT: bgez a2, .LBB61_34
; CHECK-RV32-NEXT: # %bb.33: # %cond.load117
; CHECK-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-RV32-NEXT: vmv8r.v v24, v8
+; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vmv.s.x v9, a2
; CHECK-RV32-NEXT: vsetivli zero, 31, e8, m1, tu, ma
; CHECK-RV32-NEXT: vslideup.vi v8, v9, 30
; CHECK-RV32-NEXT: addi a0, a0, 1
-; CHECK-RV32-NEXT: vmv1r.v v24, v8
-; CHECK-RV32-NEXT: vmv8r.v v8, v24
+; CHECK-RV32-NEXT: vmv1r.v v16, v8
+; CHECK-RV32-NEXT: vmv8r.v v8, v16
; CHECK-RV32-NEXT: .LBB61_34: # %else118
; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vmv.x.s a2, v16
+; CHECK-RV32-NEXT: vmv.x.s a2, v24
; CHECK-RV32-NEXT: bgez a3, .LBB61_35
; CHECK-RV32-NEXT: j .LBB61_572
; CHECK-RV32-NEXT: .LBB61_35: # %else122
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index 1d691b1..a2fcd79 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -661,8 +661,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vmerge.vvm v8, v16, v8, v0
; RV32-NEXT: csrr a7, vlenb
-; RV32-NEXT: li t3, 36
-; RV32-NEXT: mul a7, a7, t3
+; RV32-NEXT: slli a7, a7, 5
; RV32-NEXT: add a7, sp, a7
; RV32-NEXT: addi a7, a7, 16
; RV32-NEXT: vs8r.v v8, (a7) # vscale x 64-byte Folded Spill
@@ -682,7 +681,11 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vl8r.v v8, (t1) # vscale x 64-byte Folded Reload
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vmerge.vvm v8, v24, v8, v0
-; RV32-NEXT: addi t1, sp, 16
+; RV32-NEXT: csrr t1, vlenb
+; RV32-NEXT: li t2, 44
+; RV32-NEXT: mul t1, t1, t2
+; RV32-NEXT: add t1, sp, t1
+; RV32-NEXT: addi t1, t1, 16
; RV32-NEXT: vs4r.v v8, (t1) # vscale x 32-byte Folded Spill
; RV32-NEXT: vmv.s.x v0, a7
; RV32-NEXT: addi a3, a3, 12
@@ -694,8 +697,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vmerge.vvm v8, v16, v24, v0
; RV32-NEXT: csrr a7, vlenb
-; RV32-NEXT: li t1, 20
-; RV32-NEXT: mul a7, a7, t1
+; RV32-NEXT: slli a7, a7, 4
; RV32-NEXT: add a7, sp, a7
; RV32-NEXT: addi a7, a7, 16
; RV32-NEXT: vs8r.v v8, (a7) # vscale x 64-byte Folded Spill
@@ -733,7 +735,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vmerge.vvm v8, v8, v16, v0
; RV32-NEXT: csrr a7, vlenb
-; RV32-NEXT: li t0, 28
+; RV32-NEXT: li t0, 24
; RV32-NEXT: mul a7, a7, t0
; RV32-NEXT: add a7, sp, a7
; RV32-NEXT: addi a7, a7, 16
@@ -755,7 +757,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vmerge.vvm v8, v24, v8, v0
; RV32-NEXT: csrr a6, vlenb
-; RV32-NEXT: li a7, 44
+; RV32-NEXT: li a7, 40
; RV32-NEXT: mul a6, a6, a7
; RV32-NEXT: add a6, sp, a6
; RV32-NEXT: addi a6, a6, 16
@@ -772,24 +774,19 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vmerge.vvm v8, v8, v16, v0
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a4, 12
-; RV32-NEXT: mul a1, a1, a4
+; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; RV32-NEXT: vmv.s.x v0, a3
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 36
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a1, a1, 5
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vrgatherei16.vv v24, v8, v6
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 2
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a3, 92
@@ -812,8 +809,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 20
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
@@ -835,12 +831,6 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vmerge.vvm v8, v8, v16, v0
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 84
-; RV32-NEXT: mul a1, a1, a2
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a2, 72
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
@@ -860,30 +850,36 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs4r.v v28, (a1) # vscale x 32-byte Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a2, 60
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v16, (a1) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vl4r.v v20, (a1) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vmv.v.v v20, v16
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: li a2, 60
+; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
-; RV32-NEXT: vmv.v.v v16, v8
+; RV32-NEXT: vs4r.v v20, (a1) # vscale x 32-byte Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 60
+; RV32-NEXT: li a2, 44
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v16, (a1) # vscale x 32-byte Folded Spill
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl4r.v v8, (a1) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vl4r.v v16, (a1) # vscale x 32-byte Folded Reload
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vrgatherei16.vv v28, v8, v3
+; RV32-NEXT: vrgatherei16.vv v20, v16, v3
; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
-; RV32-NEXT: vmv.v.v v28, v24
+; RV32-NEXT: vmv.v.v v20, v24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 6
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs4r.v v20, (a1) # vscale x 32-byte Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI27_4)
; RV32-NEXT: addi a1, a1, %lo(.LCPI27_4)
; RV32-NEXT: lui a2, %hi(.LCPI27_5)
@@ -891,13 +887,25 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV32-NEXT: vle16.v v24, (a2)
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vle16.v v16, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 84
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v16, (a1) # vscale x 8-byte Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI27_7)
; RV32-NEXT: addi a1, a1, %lo(.LCPI27_7)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vle16.v v10, (a1)
+; RV32-NEXT: vle16.v v16, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 28
+; RV32-NEXT: li a2, 76
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs2r.v v16, (a1) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 24
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
@@ -909,18 +917,29 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl4r.v v20, (a1) # vscale x 32-byte Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 84
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v7, (a1) # vscale x 8-byte Folded Reload
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vrgatherei16.vv v24, v20, v8
+; RV32-NEXT: vrgatherei16.vv v24, v20, v7
; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
; RV32-NEXT: vmv.v.v v24, v16
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 12
-; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v0, (a1) # vscale x 64-byte Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 76
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl2r.v v28, (a1) # vscale x 16-byte Folded Reload
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vrgatherei16.vv v16, v0, v10
+; RV32-NEXT: vrgatherei16.vv v16, v0, v28
; RV32-NEXT: lui a1, %hi(.LCPI27_6)
; RV32-NEXT: addi a1, a1, %lo(.LCPI27_6)
; RV32-NEXT: lui a2, %hi(.LCPI27_8)
@@ -934,7 +953,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vle16.v v5, (a2)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 44
+; RV32-NEXT: li a2, 40
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
@@ -942,12 +961,6 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vrgatherei16.vv v0, v20, v4
; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
; RV32-NEXT: vmv.v.v v0, v16
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 84
-; RV32-NEXT: mul a1, a1, a2
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vrgatherei16.vv v16, v8, v6
; RV32-NEXT: csrr a1, vlenb
@@ -968,7 +981,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi a1, a0, 192
; RV32-NEXT: vse32.v v24, (a1)
; RV32-NEXT: addi a1, a0, 128
-; RV32-NEXT: vse32.v v28, (a1)
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 6
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vl4r.v v8, (a2) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vse32.v v8, (a1)
; RV32-NEXT: addi a1, a0, 64
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: li a3, 60
diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
index d995a31..acc6849 100644
--- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
@@ -416,14 +416,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0
; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v12, v8, 1, v0
+; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v12, v16
-; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v13, v16
-; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v14, v16
-; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v15, v16
+; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16
+; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16
+; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16
+; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16
; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0
; RV32-BITS-UNKNOWN-NEXT: ret
;
; RV32-BITS-256-LABEL: reverse_nxv32i1:
@@ -437,14 +437,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0
; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-256-NEXT: vmerge.vim v12, v8, 1, v0
+; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV32-BITS-256-NEXT: vrgather.vv v11, v12, v16
-; RV32-BITS-256-NEXT: vrgather.vv v10, v13, v16
-; RV32-BITS-256-NEXT: vrgather.vv v9, v14, v16
-; RV32-BITS-256-NEXT: vrgather.vv v8, v15, v16
+; RV32-BITS-256-NEXT: vrgather.vv v15, v8, v16
+; RV32-BITS-256-NEXT: vrgather.vv v14, v9, v16
+; RV32-BITS-256-NEXT: vrgather.vv v13, v10, v16
+; RV32-BITS-256-NEXT: vrgather.vv v12, v11, v16
; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-256-NEXT: vmsne.vi v0, v12, 0
; RV32-BITS-256-NEXT: ret
;
; RV32-BITS-512-LABEL: reverse_nxv32i1:
@@ -458,14 +458,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0
; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-512-NEXT: vmerge.vim v12, v8, 1, v0
+; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV32-BITS-512-NEXT: vrgather.vv v11, v12, v16
-; RV32-BITS-512-NEXT: vrgather.vv v10, v13, v16
-; RV32-BITS-512-NEXT: vrgather.vv v9, v14, v16
-; RV32-BITS-512-NEXT: vrgather.vv v8, v15, v16
+; RV32-BITS-512-NEXT: vrgather.vv v15, v8, v16
+; RV32-BITS-512-NEXT: vrgather.vv v14, v9, v16
+; RV32-BITS-512-NEXT: vrgather.vv v13, v10, v16
+; RV32-BITS-512-NEXT: vrgather.vv v12, v11, v16
; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-512-NEXT: vmsne.vi v0, v12, 0
; RV32-BITS-512-NEXT: ret
;
; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i1:
@@ -479,14 +479,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0
; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v12, v8, 1, v0
+; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v12, v16
-; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v13, v16
-; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v14, v16
-; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v15, v16
+; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16
+; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16
+; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16
+; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16
; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0
; RV64-BITS-UNKNOWN-NEXT: ret
;
; RV64-BITS-256-LABEL: reverse_nxv32i1:
@@ -500,14 +500,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0
; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-256-NEXT: vmerge.vim v12, v8, 1, v0
+; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV64-BITS-256-NEXT: vrgather.vv v11, v12, v16
-; RV64-BITS-256-NEXT: vrgather.vv v10, v13, v16
-; RV64-BITS-256-NEXT: vrgather.vv v9, v14, v16
-; RV64-BITS-256-NEXT: vrgather.vv v8, v15, v16
+; RV64-BITS-256-NEXT: vrgather.vv v15, v8, v16
+; RV64-BITS-256-NEXT: vrgather.vv v14, v9, v16
+; RV64-BITS-256-NEXT: vrgather.vv v13, v10, v16
+; RV64-BITS-256-NEXT: vrgather.vv v12, v11, v16
; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-256-NEXT: vmsne.vi v0, v12, 0
; RV64-BITS-256-NEXT: ret
;
; RV64-BITS-512-LABEL: reverse_nxv32i1:
@@ -521,14 +521,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0
; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-512-NEXT: vmerge.vim v12, v8, 1, v0
+; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV64-BITS-512-NEXT: vrgather.vv v11, v12, v16
-; RV64-BITS-512-NEXT: vrgather.vv v10, v13, v16
-; RV64-BITS-512-NEXT: vrgather.vv v9, v14, v16
-; RV64-BITS-512-NEXT: vrgather.vv v8, v15, v16
+; RV64-BITS-512-NEXT: vrgather.vv v15, v8, v16
+; RV64-BITS-512-NEXT: vrgather.vv v14, v9, v16
+; RV64-BITS-512-NEXT: vrgather.vv v13, v10, v16
+; RV64-BITS-512-NEXT: vrgather.vv v12, v11, v16
; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-512-NEXT: vmsne.vi v0, v12, 0
; RV64-BITS-512-NEXT: ret
%res = call <vscale x 32 x i1> @llvm.vector.reverse.nxv32i1(<vscale x 32 x i1> %a)
ret <vscale x 32 x i1> %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll b/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll
index 4bc6313..1ee7e13 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll
@@ -37772,18 +37772,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_P1(<vscale x 64 x i8> %val, <vs
; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_P1:
; CHECK-RV32VC: # %bb.0:
; CHECK-RV32VC-NEXT: csrr a1, vlenb
-; CHECK-RV32VC-NEXT: slli a5, a1, 4
+; CHECK-RV32VC-NEXT: slli a6, a1, 4
; CHECK-RV32VC-NEXT: slli a2, a1, 2
-; CHECK-RV32VC-NEXT: slli a6, a1, 3
+; CHECK-RV32VC-NEXT: slli a5, a1, 3
; CHECK-RV32VC-NEXT: mv a4, a3
; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB915_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a4, a2
; CHECK-RV32VC-NEXT: .LBB915_2:
; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: add a7, a0, a5
+; CHECK-RV32VC-NEXT: add a6, a6, a0
; CHECK-RV32VC-NEXT: slli a1, a1, 1
-; CHECK-RV32VC-NEXT: add a0, a0, a6
+; CHECK-RV32VC-NEXT: add a0, a0, a5
; CHECK-RV32VC-NEXT: mv a5, a4
; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB915_4
; CHECK-RV32VC-NEXT: # %bb.3:
@@ -37791,11 +37791,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_P1(<vscale x 64 x i8> %val, <vs
; CHECK-RV32VC-NEXT: .LBB915_4:
; CHECK-RV32VC-NEXT: addi sp, sp, -16
; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32VC-NEXT: csrr a6, vlenb
-; CHECK-RV32VC-NEXT: slli a6, a6, 3
-; CHECK-RV32VC-NEXT: sub sp, sp, a6
+; CHECK-RV32VC-NEXT: csrr a7, vlenb
+; CHECK-RV32VC-NEXT: slli a7, a7, 3
+; CHECK-RV32VC-NEXT: sub sp, sp, a7
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7)
+; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6)
; CHECK-RV32VC-NEXT: addi a6, sp, 16
; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0)
@@ -38397,18 +38397,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_PALL(<vscale x 64 x i8> %val, <
; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_PALL:
; CHECK-RV32VC: # %bb.0:
; CHECK-RV32VC-NEXT: csrr a1, vlenb
-; CHECK-RV32VC-NEXT: slli a5, a1, 4
+; CHECK-RV32VC-NEXT: slli a6, a1, 4
; CHECK-RV32VC-NEXT: slli a2, a1, 2
-; CHECK-RV32VC-NEXT: slli a6, a1, 3
+; CHECK-RV32VC-NEXT: slli a5, a1, 3
; CHECK-RV32VC-NEXT: mv a4, a3
; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB916_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a4, a2
; CHECK-RV32VC-NEXT: .LBB916_2:
; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: add a7, a0, a5
+; CHECK-RV32VC-NEXT: add a6, a6, a0
; CHECK-RV32VC-NEXT: slli a1, a1, 1
-; CHECK-RV32VC-NEXT: add a0, a0, a6
+; CHECK-RV32VC-NEXT: add a0, a0, a5
; CHECK-RV32VC-NEXT: mv a5, a4
; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB916_4
; CHECK-RV32VC-NEXT: # %bb.3:
@@ -38416,11 +38416,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_PALL(<vscale x 64 x i8> %val, <
; CHECK-RV32VC-NEXT: .LBB916_4:
; CHECK-RV32VC-NEXT: addi sp, sp, -16
; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32VC-NEXT: csrr a6, vlenb
-; CHECK-RV32VC-NEXT: slli a6, a6, 3
-; CHECK-RV32VC-NEXT: sub sp, sp, a6
+; CHECK-RV32VC-NEXT: csrr a7, vlenb
+; CHECK-RV32VC-NEXT: slli a7, a7, 3
+; CHECK-RV32VC-NEXT: sub sp, sp, a7
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7)
+; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6)
; CHECK-RV32VC-NEXT: addi a6, sp, 16
; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0)
@@ -39022,18 +39022,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_S1(<vscale x 64 x i8> %val, <vs
; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_S1:
; CHECK-RV32VC: # %bb.0:
; CHECK-RV32VC-NEXT: csrr a1, vlenb
-; CHECK-RV32VC-NEXT: slli a5, a1, 4
+; CHECK-RV32VC-NEXT: slli a6, a1, 4
; CHECK-RV32VC-NEXT: slli a2, a1, 2
-; CHECK-RV32VC-NEXT: slli a6, a1, 3
+; CHECK-RV32VC-NEXT: slli a5, a1, 3
; CHECK-RV32VC-NEXT: mv a4, a3
; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB917_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a4, a2
; CHECK-RV32VC-NEXT: .LBB917_2:
; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: add a7, a0, a5
+; CHECK-RV32VC-NEXT: add a6, a6, a0
; CHECK-RV32VC-NEXT: slli a1, a1, 1
-; CHECK-RV32VC-NEXT: add a0, a0, a6
+; CHECK-RV32VC-NEXT: add a0, a0, a5
; CHECK-RV32VC-NEXT: mv a5, a4
; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB917_4
; CHECK-RV32VC-NEXT: # %bb.3:
@@ -39041,11 +39041,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_S1(<vscale x 64 x i8> %val, <vs
; CHECK-RV32VC-NEXT: .LBB917_4:
; CHECK-RV32VC-NEXT: addi sp, sp, -16
; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32VC-NEXT: csrr a6, vlenb
-; CHECK-RV32VC-NEXT: slli a6, a6, 3
-; CHECK-RV32VC-NEXT: sub sp, sp, a6
+; CHECK-RV32VC-NEXT: csrr a7, vlenb
+; CHECK-RV32VC-NEXT: slli a7, a7, 3
+; CHECK-RV32VC-NEXT: sub sp, sp, a7
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7)
+; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6)
; CHECK-RV32VC-NEXT: addi a6, sp, 16
; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0)
@@ -39647,18 +39647,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_ALL(<vscale x 64 x i8> %val, <v
; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_ALL:
; CHECK-RV32VC: # %bb.0:
; CHECK-RV32VC-NEXT: csrr a1, vlenb
-; CHECK-RV32VC-NEXT: slli a5, a1, 4
+; CHECK-RV32VC-NEXT: slli a6, a1, 4
; CHECK-RV32VC-NEXT: slli a2, a1, 2
-; CHECK-RV32VC-NEXT: slli a6, a1, 3
+; CHECK-RV32VC-NEXT: slli a5, a1, 3
; CHECK-RV32VC-NEXT: mv a4, a3
; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB918_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a4, a2
; CHECK-RV32VC-NEXT: .LBB918_2:
; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: add a7, a0, a5
+; CHECK-RV32VC-NEXT: add a6, a6, a0
; CHECK-RV32VC-NEXT: slli a1, a1, 1
-; CHECK-RV32VC-NEXT: add a0, a0, a6
+; CHECK-RV32VC-NEXT: add a0, a0, a5
; CHECK-RV32VC-NEXT: mv a5, a4
; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB918_4
; CHECK-RV32VC-NEXT: # %bb.3:
@@ -39666,11 +39666,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_ALL(<vscale x 64 x i8> %val, <v
; CHECK-RV32VC-NEXT: .LBB918_4:
; CHECK-RV32VC-NEXT: addi sp, sp, -16
; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32VC-NEXT: csrr a6, vlenb
-; CHECK-RV32VC-NEXT: slli a6, a6, 3
-; CHECK-RV32VC-NEXT: sub sp, sp, a6
+; CHECK-RV32VC-NEXT: csrr a7, vlenb
+; CHECK-RV32VC-NEXT: slli a7, a7, 3
+; CHECK-RV32VC-NEXT: sub sp, sp, a7
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7)
+; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6)
; CHECK-RV32VC-NEXT: addi a6, sp, 16
; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0)
@@ -40271,18 +40271,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_DEFAULT(<vscale x 64 x i8> %val
; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_DEFAULT:
; CHECK-RV32VC: # %bb.0:
; CHECK-RV32VC-NEXT: csrr a1, vlenb
-; CHECK-RV32VC-NEXT: slli a5, a1, 4
+; CHECK-RV32VC-NEXT: slli a6, a1, 4
; CHECK-RV32VC-NEXT: slli a2, a1, 2
-; CHECK-RV32VC-NEXT: slli a6, a1, 3
+; CHECK-RV32VC-NEXT: slli a5, a1, 3
; CHECK-RV32VC-NEXT: mv a4, a3
; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB919_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a4, a2
; CHECK-RV32VC-NEXT: .LBB919_2:
; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: add a7, a0, a5
+; CHECK-RV32VC-NEXT: add a6, a6, a0
; CHECK-RV32VC-NEXT: slli a1, a1, 1
-; CHECK-RV32VC-NEXT: add a0, a0, a6
+; CHECK-RV32VC-NEXT: add a0, a0, a5
; CHECK-RV32VC-NEXT: mv a5, a4
; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB919_4
; CHECK-RV32VC-NEXT: # %bb.3:
@@ -40290,11 +40290,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_DEFAULT(<vscale x 64 x i8> %val
; CHECK-RV32VC-NEXT: .LBB919_4:
; CHECK-RV32VC-NEXT: addi sp, sp, -16
; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32VC-NEXT: csrr a6, vlenb
-; CHECK-RV32VC-NEXT: slli a6, a6, 3
-; CHECK-RV32VC-NEXT: sub sp, sp, a6
+; CHECK-RV32VC-NEXT: csrr a7, vlenb
+; CHECK-RV32VC-NEXT: slli a7, a7, 3
+; CHECK-RV32VC-NEXT: sub sp, sp, a7
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7)
+; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6)
; CHECK-RV32VC-NEXT: addi a6, sp, 16
; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/remat.ll b/llvm/test/CodeGen/RISCV/rvv/remat.ll
index 06d54fa..95bff27 100644
--- a/llvm/test/CodeGen/RISCV/rvv/remat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/remat.ll
@@ -301,3 +301,135 @@ define void @vfmv.s.f(ptr %p, double %x) {
store volatile double %x, ptr %p
ret void
}
+
+; This test is fairly fragile, but it's trying to cover the case which
+; caused the revert of bba9172 due to interaction with how rematerialize
+; instructions are pruned from the original live interval. In the result
+; below, we remat the vmv.v.x into the loop, but fail to remat the vmv.v.x
+; a second time after further splitting it's live range. We shouldn't need
+; to spill it to the stack at all.
+define i64 @dual_remat(i64 %0, <vscale x 16 x i64> %1, <vscale x 16 x i64> %2, ptr %p) #0 {
+; CHECK-LABEL: dual_remat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a2, a1, 5
+; CHECK-NEXT: add a1, a2, a1
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x21, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 33 * vlenb
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: srli a1, a2, 3
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: vmv.v.i v0, 0
+; CHECK-NEXT: .LBB8_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: mv a5, a4
+; CHECK-NEXT: slli a4, a4, 3
+; CHECK-NEXT: add a5, a5, a4
+; CHECK-NEXT: slli a4, a4, 1
+; CHECK-NEXT: add a4, a4, a5
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a5, a4, 4
+; CHECK-NEXT: add a4, a5, a4
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: mv a5, a4
+; CHECK-NEXT: slli a4, a4, 3
+; CHECK-NEXT: add a5, a5, a4
+; CHECK-NEXT: slli a4, a4, 1
+; CHECK-NEXT: add a4, a4, a5
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vl8r.v v16, (a4) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vand.vv v16, v16, v8
+; CHECK-NEXT: vmsne.vi v24, v16, 0
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a4, a4, 4
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vs1r.v v24, (a4) # vscale x 8-byte Folded Spill
+; CHECK-NEXT: vand.vv v16, v0, v8
+; CHECK-NEXT: vmsne.vi v8, v16, 0
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: mv a5, a4
+; CHECK-NEXT: slli a4, a4, 3
+; CHECK-NEXT: add a5, a5, a4
+; CHECK-NEXT: slli a4, a4, 1
+; CHECK-NEXT: add a4, a4, a5
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vl8r.v v16, (a4) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a4, a4, 4
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vl1r.v v9, (a4) # vscale x 8-byte Folded Reload
+; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a1
+; CHECK-NEXT: vsetvli a4, zero, e8, m2, ta, ma
+; CHECK-NEXT: vcpop.m a4, v9
+; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: slli a6, a5, 4
+; CHECK-NEXT: add a5, a6, a5
+; CHECK-NEXT: add a5, sp, a5
+; CHECK-NEXT: addi a5, a5, 16
+; CHECK-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vs8r.v v8, (a3)
+; CHECK-NEXT: vs8r.v v8, (a2)
+; CHECK-NEXT: addi a5, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; CHECK-NEXT: vor.vv v16, v16, v8
+; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: slli a5, a5, 3
+; CHECK-NEXT: add a5, sp, a5
+; CHECK-NEXT: addi a5, a5, 16
+; CHECK-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vor.vv v0, v0, v8
+; CHECK-NEXT: beqz a4, .LBB8_1
+; CHECK-NEXT: # %bb.2: # %middle.block
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a2, a1, 5
+; CHECK-NEXT: add a1, a2, a1
+; CHECK-NEXT: add sp, sp, a1
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <vscale x 16 x i64> zeroinitializer, i64 %0, i64 0
+ %broadcast.splat = shufflevector <vscale x 16 x i64> %broadcast.splatinsert, <vscale x 16 x i64> zeroinitializer, <vscale x 16 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %vec.ind = phi <vscale x 16 x i64> [ zeroinitializer, %entry ], [ %vec.ind.next, %vector.body ]
+ %3 = and <vscale x 16 x i64> %vec.ind, %broadcast.splat
+ %4 = icmp ne <vscale x 16 x i64> %3, zeroinitializer
+ store <vscale x 16 x i64> %broadcast.splat, ptr %p
+ %5 = tail call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> %4)
+ %vec.ind.next = or <vscale x 16 x i64> %vec.ind, %1
+ br i1 %5, label %middle.block, label %vector.body
+
+middle.block: ; preds = %vector.body
+ %and.i = and i64 1, %0
+ ret i64 %and.i
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 02825b2..19a1841 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -6018,3 +6018,39 @@ vector.latch: ; preds = %for.body419
for.cond.cleanup: ; preds = %vector.latch
ret void
}
+
+;; This is exactly like sink_add_splat except that the splat has operands
+;; which haven't been converted to undef.
+define void @sink_non_canonical_splat(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_non_canonical_splat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a2, 1
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB131_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vadd.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a2, .LBB131_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = add <4 x i32> %wide.load, %broadcast.splat
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/select-bare.ll b/llvm/test/CodeGen/RISCV/select-bare.ll
index 44028a7..550eb94 100644
--- a/llvm/test/CodeGen/RISCV/select-bare.ll
+++ b/llvm/test/CodeGen/RISCV/select-bare.ll
@@ -3,7 +3,7 @@
; RUN: | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv64 -mattr=+xmipscmov -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I-CCMOV %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
define i32 @bare_select(i1 %a, i32 %b, i32 %c) nounwind {
diff --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll
index b57f625..95f5a9d 100644
--- a/llvm/test/CodeGen/RISCV/select-cc.ll
+++ b/llvm/test/CodeGen/RISCV/select-cc.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32I %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
; RUN: llc -mtriple=riscv64 -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64I %s
@@ -88,39 +88,38 @@ define signext i32 @foo(i32 signext %a, ptr %b) nounwind {
; RV32IXQCI-LABEL: foo:
; RV32IXQCI: # %bb.0:
; RV32IXQCI-NEXT: lw a2, 0(a1)
-; RV32IXQCI-NEXT: lw a4, 0(a1)
-; RV32IXQCI-NEXT: lw t5, 0(a1)
-; RV32IXQCI-NEXT: lw t4, 0(a1)
-; RV32IXQCI-NEXT: lw t3, 0(a1)
-; RV32IXQCI-NEXT: lw t2, 0(a1)
-; RV32IXQCI-NEXT: lw t0, 0(a1)
-; RV32IXQCI-NEXT: lw a7, 0(a1)
-; RV32IXQCI-NEXT: lw a6, 0(a1)
; RV32IXQCI-NEXT: lw a3, 0(a1)
-; RV32IXQCI-NEXT: lw t1, 0(a1)
+; RV32IXQCI-NEXT: lw a4, 0(a1)
; RV32IXQCI-NEXT: lw a5, 0(a1)
-; RV32IXQCI-NEXT: bltz t1, .LBB0_2
+; RV32IXQCI-NEXT: qc.mvne a0, a0, a2, a2
+; RV32IXQCI-NEXT: qc.mveq a0, a0, a3, a3
+; RV32IXQCI-NEXT: lw a2, 0(a1)
+; RV32IXQCI-NEXT: qc.mvgeu a0, a4, a0, a4
+; RV32IXQCI-NEXT: lw a3, 0(a1)
+; RV32IXQCI-NEXT: qc.mvltu a0, a0, a5, a5
+; RV32IXQCI-NEXT: lw a4, 0(a1)
+; RV32IXQCI-NEXT: qc.mvgeu a0, a0, a2, a2
+; RV32IXQCI-NEXT: lw a2, 0(a1)
+; RV32IXQCI-NEXT: qc.mvltu a0, a3, a0, a3
+; RV32IXQCI-NEXT: lw a3, 0(a1)
+; RV32IXQCI-NEXT: qc.mvge a0, a4, a0, a4
+; RV32IXQCI-NEXT: lw a4, 0(a1)
+; RV32IXQCI-NEXT: qc.mvlt a0, a0, a2, a2
+; RV32IXQCI-NEXT: lw a2, 0(a1)
+; RV32IXQCI-NEXT: qc.mvge a0, a0, a3, a3
+; RV32IXQCI-NEXT: lw a3, 0(a1)
+; RV32IXQCI-NEXT: qc.mvlt a0, a4, a0, a4
+; RV32IXQCI-NEXT: lw a4, 0(a1)
+; RV32IXQCI-NEXT: lw a1, 0(a1)
+; RV32IXQCI-NEXT: blez a2, .LBB0_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: li a5, 0
-; RV32IXQCI-NEXT: qc.mveq a2, a0, a2, a0
-; RV32IXQCI-NEXT: qc.mvne a4, a2, a4, a2
-; RV32IXQCI-NEXT: qc.mvltu t5, t5, a4, a4
-; RV32IXQCI-NEXT: qc.mvgeu t4, t5, t4, t5
-; RV32IXQCI-NEXT: qc.mvltu t3, t4, t3, t4
-; RV32IXQCI-NEXT: qc.mvgeu t2, t2, t3, t3
-; RV32IXQCI-NEXT: qc.mvlt t0, t0, t2, t2
-; RV32IXQCI-NEXT: qc.mvge a7, t0, a7, t0
-; RV32IXQCI-NEXT: qc.mvlt a6, a7, a6, a7
-; RV32IXQCI-NEXT: qc.mvge a3, a3, a6, a6
-; RV32IXQCI-NEXT: qc.mvlt a3, a5, t1, t1
-; RV32IXQCI-NEXT: mv a5, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: .LBB0_2:
-; RV32IXQCI-NEXT: lw a2, 0(a1)
-; RV32IXQCI-NEXT: lw a0, 0(a1)
-; RV32IXQCI-NEXT: li a1, 1024
-; RV32IXQCI-NEXT: qc.mvlt a2, a1, a2, a5
-; RV32IXQCI-NEXT: li a1, 2046
-; RV32IXQCI-NEXT: qc.mvltu a0, a1, t1, a2
+; RV32IXQCI-NEXT: qc.mvlti a0, a2, 0, a3
+; RV32IXQCI-NEXT: li a3, 1024
+; RV32IXQCI-NEXT: qc.mvge a0, a3, a4, a4
+; RV32IXQCI-NEXT: li a3, 2046
+; RV32IXQCI-NEXT: qc.mvgeu a0, a3, a2, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: foo:
diff --git a/llvm/test/CodeGen/RISCV/select-cond.ll b/llvm/test/CodeGen/RISCV/select-cond.ll
index 3ca0f46..a3c48737 100644
--- a/llvm/test/CodeGen/RISCV/select-cond.ll
+++ b/llvm/test/CodeGen/RISCV/select-cond.ll
@@ -7,7 +7,7 @@
; RUN: | FileCheck %s --check-prefixes=RV32-XQCICM
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcics -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32-XQCICS
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV64
diff --git a/llvm/test/CodeGen/RISCV/select-const.ll b/llvm/test/CodeGen/RISCV/select-const.ll
index 65d10bb..dfac6e1 100644
--- a/llvm/test/CodeGen/RISCV/select-const.ll
+++ b/llvm/test/CodeGen/RISCV/select-const.ll
@@ -5,7 +5,7 @@
; RUN: | FileCheck -check-prefixes=RV32,RV32IF %s
; RUN: llc -mtriple=riscv32 -mattr=+zicond -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32,RV32ZICOND %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
; RUN: llc -mtriple=riscv64 -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64,RV64I %s
@@ -579,9 +579,9 @@ define i32 @select_slt_zero_constant1_constant2(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_slt_zero_constant1_constant2:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srai a0, a0, 31
-; RV32IXQCI-NEXT: andi a0, a0, 10
-; RV32IXQCI-NEXT: addi a0, a0, -3
+; RV32IXQCI-NEXT: li a1, -3
+; RV32IXQCI-NEXT: qc.lilti a1, a0, 0, 7
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_slt_zero_constant1_constant2:
@@ -605,9 +605,9 @@ define i32 @select_sgt_negative_one_constant1_constant2(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_sgt_negative_one_constant1_constant2:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srai a0, a0, 31
-; RV32IXQCI-NEXT: andi a0, a0, -10
-; RV32IXQCI-NEXT: addi a0, a0, 7
+; RV32IXQCI-NEXT: li a1, -3
+; RV32IXQCI-NEXT: qc.ligei a1, a0, 0, 7
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_sgt_negative_one_constant1_constant2:
@@ -653,12 +653,10 @@ define i32 @select_nonnegative_lui_addi(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_nonnegative_lui_addi:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: mv a1, a0
-; RV32IXQCI-NEXT: lui a0, 4
-; RV32IXQCI-NEXT: bgez a1, .LBB21_2
-; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: li a0, 25
-; RV32IXQCI-NEXT: .LBB21_2:
+; RV32IXQCI-NEXT: lui a2, 4
+; RV32IXQCI-NEXT: li a1, 25
+; RV32IXQCI-NEXT: qc.mvgei a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: select_nonnegative_lui_addi:
@@ -726,12 +724,10 @@ define i32 @select_nonnegative_lui_addi_swapped(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_nonnegative_lui_addi_swapped:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: bgez a0, .LBB22_2
-; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: lui a0, 4
-; RV32IXQCI-NEXT: ret
-; RV32IXQCI-NEXT: .LBB22_2:
-; RV32IXQCI-NEXT: li a0, 25
+; RV32IXQCI-NEXT: li a2, 25
+; RV32IXQCI-NEXT: lui a1, 4
+; RV32IXQCI-NEXT: qc.mvgei a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: select_nonnegative_lui_addi_swapped:
@@ -801,13 +797,13 @@ define i32 @diff_shl_addi(i32 signext %x) {
;
; RV32IXQCI-LABEL: diff_shl_addi:
; RV32IXQCI: # %bb.0:
+; RV32IXQCI-NEXT: lui a2, 4
+; RV32IXQCI-NEXT: li a1, 25
; RV32IXQCI-NEXT: bgez a0, .LBB23_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: lui a0, 4
-; RV32IXQCI-NEXT: addi a0, a0, 25
-; RV32IXQCI-NEXT: ret
+; RV32IXQCI-NEXT: addi a1, a2, 25
; RV32IXQCI-NEXT: .LBB23_2:
-; RV32IXQCI-NEXT: li a0, 25
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: diff_shl_addi:
@@ -876,13 +872,13 @@ define i32 @diff_shl_addi2(i32 signext %x) {
;
; RV32IXQCI-LABEL: diff_shl_addi2:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: bgez a0, .LBB24_2
+; RV32IXQCI-NEXT: lui a2, 4
+; RV32IXQCI-NEXT: li a1, 25
+; RV32IXQCI-NEXT: bltz a0, .LBB24_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: li a0, 25
-; RV32IXQCI-NEXT: ret
+; RV32IXQCI-NEXT: addi a1, a2, 25
; RV32IXQCI-NEXT: .LBB24_2:
-; RV32IXQCI-NEXT: lui a0, 4
-; RV32IXQCI-NEXT: addi a0, a0, 25
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: diff_shl_addi2:
@@ -929,9 +925,10 @@ define i32 @diff_pow2_24_16(i32 signext %x) {
;
; RV32IXQCI-LABEL: diff_pow2_24_16:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srai a0, a0, 31
-; RV32IXQCI-NEXT: andi a0, a0, -8
-; RV32IXQCI-NEXT: addi a0, a0, 24
+; RV32IXQCI-NEXT: li a2, 24
+; RV32IXQCI-NEXT: li a1, 16
+; RV32IXQCI-NEXT: qc.mvgei a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: diff_pow2_24_16:
@@ -955,9 +952,10 @@ define i32 @diff_pow2_16_24(i32 signext %x) {
;
; RV32IXQCI-LABEL: diff_pow2_16_24:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srli a0, a0, 28
-; RV32IXQCI-NEXT: andi a0, a0, 8
-; RV32IXQCI-NEXT: addi a0, a0, 16
+; RV32IXQCI-NEXT: li a2, 16
+; RV32IXQCI-NEXT: li a1, 24
+; RV32IXQCI-NEXT: qc.mvgei a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: diff_pow2_16_24:
@@ -1008,14 +1006,14 @@ define i32 @zext_or_constant(i32 signext %x) {
;
; RV32IXQCI-LABEL: zext_or_constant:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: bgez a0, .LBB27_2
+; RV32IXQCI-NEXT: srli a2, a0, 31
+; RV32IXQCI-NEXT: lui a1, 140
+; RV32IXQCI-NEXT: addi a1, a1, 417
+; RV32IXQCI-NEXT: bltz a0, .LBB27_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: lui a0, 140
-; RV32IXQCI-NEXT: addi a0, a0, 417
-; RV32IXQCI-NEXT: ret
+; RV32IXQCI-NEXT: xori a1, a2, 1
; RV32IXQCI-NEXT: .LBB27_2:
-; RV32IXQCI-NEXT: srli a0, a0, 31
-; RV32IXQCI-NEXT: xori a0, a0, 1
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: zext_or_constant:
@@ -1095,14 +1093,14 @@ define i32 @zext_or_constant2(i32 signext %x) {
;
; RV32IXQCI-LABEL: zext_or_constant2:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: bltz a0, .LBB28_2
+; RV32IXQCI-NEXT: srli a2, a0, 31
+; RV32IXQCI-NEXT: lui a1, 140
+; RV32IXQCI-NEXT: addi a1, a1, 417
+; RV32IXQCI-NEXT: bgez a0, .LBB28_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: lui a0, 140
-; RV32IXQCI-NEXT: addi a0, a0, 417
-; RV32IXQCI-NEXT: ret
+; RV32IXQCI-NEXT: xori a1, a2, 1
; RV32IXQCI-NEXT: .LBB28_2:
-; RV32IXQCI-NEXT: srli a0, a0, 31
-; RV32IXQCI-NEXT: xori a0, a0, 1
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: zext_or_constant2:
@@ -1183,14 +1181,14 @@ define i32 @sext_or_constant(i32 signext %x) {
;
; RV32IXQCI-LABEL: sext_or_constant:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: bgez a0, .LBB29_2
+; RV32IXQCI-NEXT: srli a2, a0, 31
+; RV32IXQCI-NEXT: lui a1, 140
+; RV32IXQCI-NEXT: addi a1, a1, 417
+; RV32IXQCI-NEXT: bltz a0, .LBB29_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: lui a0, 140
-; RV32IXQCI-NEXT: addi a0, a0, 417
-; RV32IXQCI-NEXT: ret
+; RV32IXQCI-NEXT: addi a1, a2, -1
; RV32IXQCI-NEXT: .LBB29_2:
-; RV32IXQCI-NEXT: srli a0, a0, 31
-; RV32IXQCI-NEXT: addi a0, a0, -1
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: sext_or_constant:
@@ -1271,14 +1269,14 @@ define i32 @sext_or_constant2(i32 signext %x) {
;
; RV32IXQCI-LABEL: sext_or_constant2:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: bltz a0, .LBB30_2
+; RV32IXQCI-NEXT: srli a2, a0, 31
+; RV32IXQCI-NEXT: lui a1, 140
+; RV32IXQCI-NEXT: addi a1, a1, 417
+; RV32IXQCI-NEXT: bgez a0, .LBB30_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: lui a0, 140
-; RV32IXQCI-NEXT: addi a0, a0, 417
-; RV32IXQCI-NEXT: ret
+; RV32IXQCI-NEXT: addi a1, a2, -1
; RV32IXQCI-NEXT: .LBB30_2:
-; RV32IXQCI-NEXT: srli a0, a0, 31
-; RV32IXQCI-NEXT: addi a0, a0, -1
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: sext_or_constant2:
@@ -1332,9 +1330,9 @@ define i32 @select_0_6(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_0_6:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srai a0, a0, 2
-; RV32IXQCI-NEXT: srli a0, a0, 30
-; RV32IXQCI-NEXT: slli a0, a0, 1
+; RV32IXQCI-NEXT: li a1, 6
+; RV32IXQCI-NEXT: qc.ligei a1, a0, 0, 0
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_0_6:
@@ -1358,9 +1356,9 @@ define i32 @select_6_0(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_6_0:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srli a0, a0, 31
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: andi a0, a0, 6
+; RV32IXQCI-NEXT: li a1, 0
+; RV32IXQCI-NEXT: qc.ligei a1, a0, 0, 6
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_6_0:
@@ -1383,8 +1381,9 @@ define i32 @select_0_394(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_0_394:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srai a0, a0, 31
-; RV32IXQCI-NEXT: andi a0, a0, 394
+; RV32IXQCI-NEXT: li a1, 394
+; RV32IXQCI-NEXT: qc.ligei a1, a0, 0, 0
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_0_394:
@@ -1407,9 +1406,9 @@ define i32 @select_394_0(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_394_0:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srli a0, a0, 31
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: andi a0, a0, 394
+; RV32IXQCI-NEXT: li a1, 394
+; RV32IXQCI-NEXT: qc.lilti a1, a0, 0, 0
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_394_0:
diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index 8273c65..1eb47e4c 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -4,7 +4,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+m,+xventanacondops -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64IMXVTCONDOPS %s
; RUN: llc -mtriple=riscv32 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECKZICOND,RV32IMZICOND %s
; RUN: llc -mtriple=riscv64 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECKZICOND,RV64IMZICOND %s
-; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
define i16 @select_xor_1(i16 %A, i8 %cond) {
@@ -44,10 +44,11 @@ define i16 @select_xor_1(i16 %A, i8 %cond) {
;
; RV32IXQCI-LABEL: select_xor_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a1, a1, 31
-; RV32IXQCI-NEXT: srai a1, a1, 31
-; RV32IXQCI-NEXT: andi a1, a1, 43
-; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: andi a1, a1, 1
+; RV32IXQCI-NEXT: beqz a1, .LBB0_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xori a0, a0, 43
+; RV32IXQCI-NEXT: .LBB0_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -102,10 +103,11 @@ define i16 @select_xor_1b(i16 %A, i8 %cond) {
;
; RV32IXQCI-LABEL: select_xor_1b:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a1, a1, 31
-; RV32IXQCI-NEXT: srai a1, a1, 31
-; RV32IXQCI-NEXT: andi a1, a1, 43
-; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: andi a1, a1, 1
+; RV32IXQCI-NEXT: beqz a1, .LBB1_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xori a0, a0, 43
+; RV32IXQCI-NEXT: .LBB1_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -148,10 +150,11 @@ define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) {
;
; RV32IXQCI-LABEL: select_xor_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a2, a2, 31
-; RV32IXQCI-NEXT: srai a2, a2, 31
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB2_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB2_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -196,10 +199,11 @@ define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) {
;
; RV32IXQCI-LABEL: select_xor_2b:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a2, a2, 31
-; RV32IXQCI-NEXT: srai a2, a2, 31
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB3_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB3_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -221,9 +225,10 @@ define i16 @select_xor_3(i16 %A, i8 %cond) {
; RV32IXQCI-LABEL: select_xor_3:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a1, a1, 1
-; RV32IXQCI-NEXT: addi a1, a1, -1
-; RV32IXQCI-NEXT: andi a1, a1, 43
-; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: bnez a1, .LBB4_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xori a0, a0, 43
+; RV32IXQCI-NEXT: .LBB4_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -247,9 +252,10 @@ define i16 @select_xor_3b(i16 %A, i8 %cond) {
; RV32IXQCI-LABEL: select_xor_3b:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a1, a1, 1
-; RV32IXQCI-NEXT: addi a1, a1, -1
-; RV32IXQCI-NEXT: andi a1, a1, 43
-; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: bnez a1, .LBB5_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xori a0, a0, 43
+; RV32IXQCI-NEXT: .LBB5_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -293,9 +299,10 @@ define i32 @select_xor_4(i32 %A, i32 %B, i8 %cond) {
; RV32IXQCI-LABEL: select_xor_4:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a2, a2, 1
-; RV32IXQCI-NEXT: addi a2, a2, -1
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: bnez a2, .LBB6_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB6_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -341,9 +348,10 @@ define i32 @select_xor_4b(i32 %A, i32 %B, i8 %cond) {
; RV32IXQCI-LABEL: select_xor_4b:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a2, a2, 1
-; RV32IXQCI-NEXT: addi a2, a2, -1
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: bnez a2, .LBB7_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB7_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -382,9 +390,12 @@ define i32 @select_xor_5(i1 zeroext %cond, i32 %x) {
;
; RV32IXQCI-LABEL: select_xor_5:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a1
-; RV32IXQCI-NEXT: xori a0, a0, 128
+; RV32IXQCI-NEXT: li a2, 128
+; RV32IXQCI-NEXT: bnez a0, .LBB8_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: xori a2, a1, 128
+; RV32IXQCI-NEXT: .LBB8_2:
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
%add = xor i32 %x, 128
%sel = select i1 %cond, i32 128, i32 %add
@@ -424,10 +435,11 @@ define i32 @select_or(i32 %A, i32 %B, i8 %cond) {
;
; RV32IXQCI-LABEL: select_or:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a2, a2, 31
-; RV32IXQCI-NEXT: srai a2, a2, 31
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB9_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB9_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -472,10 +484,11 @@ define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) {
;
; RV32IXQCI-LABEL: select_or_b:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a2, a2, 31
-; RV32IXQCI-NEXT: srai a2, a2, 31
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB10_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB10_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -518,10 +531,11 @@ define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) {
;
; RV32IXQCI-LABEL: select_or_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a2, a2, 31
-; RV32IXQCI-NEXT: srai a2, a2, 31
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB11_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB11_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i32 %cond, 1
@@ -566,10 +580,11 @@ define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) {
;
; RV32IXQCI-LABEL: select_or_1b:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a2, a2, 31
-; RV32IXQCI-NEXT: srai a2, a2, 31
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB12_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB12_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i32 %cond, 1
@@ -613,9 +628,10 @@ define i32 @select_or_2(i32 %A, i32 %B, i8 %cond) {
; RV32IXQCI-LABEL: select_or_2:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a2, a2, 1
-; RV32IXQCI-NEXT: addi a2, a2, -1
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: bnez a2, .LBB13_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB13_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -661,9 +677,10 @@ define i32 @select_or_2b(i32 %A, i32 %B, i8 %cond) {
; RV32IXQCI-LABEL: select_or_2b:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a2, a2, 1
-; RV32IXQCI-NEXT: addi a2, a2, -1
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: bnez a2, .LBB14_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB14_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -707,9 +724,10 @@ define i32 @select_or_3(i32 %A, i32 %B, i32 %cond) {
; RV32IXQCI-LABEL: select_or_3:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a2, a2, 1
-; RV32IXQCI-NEXT: addi a2, a2, -1
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: bnez a2, .LBB15_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB15_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i32 %cond, 1
@@ -755,9 +773,10 @@ define i32 @select_or_3b(i32 %A, i32 %B, i32 %cond) {
; RV32IXQCI-LABEL: select_or_3b:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a2, a2, 1
-; RV32IXQCI-NEXT: addi a2, a2, -1
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: bnez a2, .LBB16_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB16_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i32 %cond, 1
@@ -796,9 +815,12 @@ define i32 @select_or_4(i1 zeroext %cond, i32 %x) {
;
; RV32IXQCI-LABEL: select_or_4:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a1
-; RV32IXQCI-NEXT: ori a0, a0, 128
+; RV32IXQCI-NEXT: li a2, 128
+; RV32IXQCI-NEXT: bnez a0, .LBB17_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: ori a2, a1, 128
+; RV32IXQCI-NEXT: .LBB17_2:
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
%add = or i32 %x, 128
%sel = select i1 %cond, i32 128, i32 %add
@@ -840,9 +862,11 @@ define i32 @select_add_1(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_add_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: neg a0, a0
-; RV32IXQCI-NEXT: and a0, a0, a1
-; RV32IXQCI-NEXT: add a0, a0, a2
+; RV32IXQCI-NEXT: beqz a0, .LBB18_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: add a2, a2, a1
+; RV32IXQCI-NEXT: .LBB18_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%c = add i32 %a, %b
@@ -885,9 +909,11 @@ define i32 @select_add_2(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_add_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a2
-; RV32IXQCI-NEXT: add a0, a0, a1
+; RV32IXQCI-NEXT: bnez a0, .LBB19_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: add a1, a1, a2
+; RV32IXQCI-NEXT: .LBB19_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = add i32 %a, %b
@@ -933,9 +959,11 @@ define i32 @select_add_3(i1 zeroext %cond, i32 %a) {
;
; RV32IXQCI-LABEL: select_add_3:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: andi a0, a0, 42
-; RV32IXQCI-NEXT: add a0, a0, a1
+; RV32IXQCI-NEXT: bnez a0, .LBB20_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: addi a1, a1, 42
+; RV32IXQCI-NEXT: .LBB20_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = add i32 %a, 42
@@ -978,9 +1006,12 @@ define i32 @select_add_4(i1 zeroext %cond, i32 %x) {
;
; RV32IXQCI-LABEL: select_add_4:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a1
-; RV32IXQCI-NEXT: addi a0, a0, 128
+; RV32IXQCI-NEXT: li a2, 128
+; RV32IXQCI-NEXT: bnez a0, .LBB21_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: addi a2, a1, 128
+; RV32IXQCI-NEXT: .LBB21_2:
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
%add = add i32 %x, 128
%sel = select i1 %cond, i32 128, i32 %add
@@ -1029,12 +1060,14 @@ define i64 @select_add_5(i1 zeroext %cond, i64 %x) {
;
; RV32IXQCI-LABEL: select_add_5:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: addi a3, a0, -1
-; RV32IXQCI-NEXT: and a1, a1, a3
-; RV32IXQCI-NEXT: addi a0, a1, 128
-; RV32IXQCI-NEXT: sltu a1, a0, a1
-; RV32IXQCI-NEXT: and a2, a2, a3
-; RV32IXQCI-NEXT: add a1, a1, a2
+; RV32IXQCI-NEXT: mv a3, a0
+; RV32IXQCI-NEXT: addi a4, a1, 128
+; RV32IXQCI-NEXT: sltu a0, a4, a1
+; RV32IXQCI-NEXT: add a2, a2, a0
+; RV32IXQCI-NEXT: li a0, 128
+; RV32IXQCI-NEXT: qc.mveqi a0, a3, 0, a4
+; RV32IXQCI-NEXT: qc.selectieqi a3, 0, a2, 0
+; RV32IXQCI-NEXT: mv a1, a3
; RV32IXQCI-NEXT: ret
%add = add i64 %x, 128
%sel = select i1 %cond, i64 128, i64 %add
@@ -1093,14 +1126,15 @@ define i64 @select_add_6(i1 zeroext %cond, i64 %x) {
;
; RV32IXQCI-LABEL: select_add_6:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: addi a3, a0, -1
+; RV32IXQCI-NEXT: mv a3, a0
; RV32IXQCI-NEXT: lui a0, 14
-; RV32IXQCI-NEXT: and a1, a1, a3
-; RV32IXQCI-NEXT: addi a0, a0, 1005
-; RV32IXQCI-NEXT: add a0, a0, a1
+; RV32IXQCI-NEXT: addi a4, a0, 1005
+; RV32IXQCI-NEXT: add a0, a1, a4
; RV32IXQCI-NEXT: sltu a1, a0, a1
-; RV32IXQCI-NEXT: and a2, a2, a3
; RV32IXQCI-NEXT: add a1, a1, a2
+; RV32IXQCI-NEXT: qc.mvnei a0, a3, 0, a4
+; RV32IXQCI-NEXT: qc.selectieqi a3, 0, a1, 0
+; RV32IXQCI-NEXT: mv a1, a3
; RV32IXQCI-NEXT: ret
%add = add i64 %x, 58349
%sel = select i1 %cond, i64 58349, i64 %add
@@ -1152,9 +1186,11 @@ define i32 @select_sub_1(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_sub_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: sub a1, a1, a2
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: beqz a0, .LBB24_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: sub a2, a1, a2
+; RV32IXQCI-NEXT: .LBB24_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%c = sub i32 %a, %b
@@ -1197,9 +1233,11 @@ define i32 @select_sub_2(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_sub_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a2
-; RV32IXQCI-NEXT: sub a0, a1, a0
+; RV32IXQCI-NEXT: bnez a0, .LBB25_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: sub a1, a1, a2
+; RV32IXQCI-NEXT: .LBB25_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = sub i32 %a, %b
@@ -1245,9 +1283,11 @@ define i32 @select_sub_3(i1 zeroext %cond, i32 %a) {
;
; RV32IXQCI-LABEL: select_sub_3:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: andi a0, a0, 42
-; RV32IXQCI-NEXT: sub a0, a1, a0
+; RV32IXQCI-NEXT: bnez a0, .LBB26_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: addi a1, a1, -42
+; RV32IXQCI-NEXT: .LBB26_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = sub i32 %a, 42
@@ -1301,10 +1341,12 @@ define i32 @select_sub_4(i1 zeroext %cond, i32 %x) {
;
; RV32IXQCI-LABEL: select_sub_4:
; RV32IXQCI: # %bb.0:
+; RV32IXQCI-NEXT: li a2, 128
+; RV32IXQCI-NEXT: bnez a0, .LBB27_2
+; RV32IXQCI-NEXT: # %bb.1:
; RV32IXQCI-NEXT: addi a2, a1, -128
-; RV32IXQCI-NEXT: li a1, 128
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: .LBB27_2:
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
%add = sub i32 %x, 128
%sel = select i1 %cond, i32 128, i32 %add
@@ -1347,9 +1389,11 @@ define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_and_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: and a1, a1, a2
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: beqz a0, .LBB28_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: and a2, a2, a1
+; RV32IXQCI-NEXT: .LBB28_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%c = and i32 %a, %b
@@ -1392,9 +1436,11 @@ define i32 @select_and_2(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_and_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: and a2, a2, a1
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: bnez a0, .LBB29_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: .LBB29_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = and i32 %a, %b
@@ -1437,9 +1483,11 @@ define i32 @select_and_3(i1 zeroext %cond, i32 %a) {
;
; RV32IXQCI-LABEL: select_and_3:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: andi a2, a1, 42
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: bnez a0, .LBB30_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: andi a1, a1, 42
+; RV32IXQCI-NEXT: .LBB30_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = and i32 %a, 42
@@ -1626,9 +1674,11 @@ define i32 @select_udiv_3(i1 zeroext %cond, i32 %a) {
; RV32IXQCI-NEXT: lui a3, 199729
; RV32IXQCI-NEXT: addi a3, a3, -975
; RV32IXQCI-NEXT: mulhu a2, a2, a3
-; RV32IXQCI-NEXT: srli a2, a2, 2
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: bnez a0, .LBB33_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: srli a1, a2, 2
+; RV32IXQCI-NEXT: .LBB33_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = udiv i32 %a, 42
@@ -1681,9 +1731,11 @@ define i32 @select_shl_1(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_shl_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: sll a1, a1, a2
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: beqz a0, .LBB34_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: sll a2, a1, a2
+; RV32IXQCI-NEXT: .LBB34_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%c = shl i32 %a, %b
@@ -1726,9 +1778,11 @@ define i32 @select_shl_2(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_shl_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a2
-; RV32IXQCI-NEXT: sll a0, a1, a0
+; RV32IXQCI-NEXT: bnez a0, .LBB35_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: sll a1, a1, a2
+; RV32IXQCI-NEXT: .LBB35_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = shl i32 %a, %b
@@ -1797,9 +1851,11 @@ define i32 @select_ashr_1(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_ashr_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: sra a1, a1, a2
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: beqz a0, .LBB37_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: sra a2, a1, a2
+; RV32IXQCI-NEXT: .LBB37_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%c = ashr i32 %a, %b
@@ -1842,9 +1898,11 @@ define i32 @select_ashr_2(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_ashr_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a2
-; RV32IXQCI-NEXT: sra a0, a1, a0
+; RV32IXQCI-NEXT: bnez a0, .LBB38_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: sra a1, a1, a2
+; RV32IXQCI-NEXT: .LBB38_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = ashr i32 %a, %b
@@ -1913,9 +1971,11 @@ define i32 @select_lshr_1(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_lshr_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: srl a1, a1, a2
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: beqz a0, .LBB40_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: srl a2, a1, a2
+; RV32IXQCI-NEXT: .LBB40_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%c = lshr i32 %a, %b
@@ -1958,9 +2018,11 @@ define i32 @select_lshr_2(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_lshr_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a2
-; RV32IXQCI-NEXT: srl a0, a1, a0
+; RV32IXQCI-NEXT: bnez a0, .LBB41_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: srl a1, a1, a2
+; RV32IXQCI-NEXT: .LBB41_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = lshr i32 %a, %b
@@ -2304,11 +2366,13 @@ define i32 @select_cst3(i1 zeroext %cond) {
;
; RV32IXQCI-LABEL: select_cst3:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: lui a1, 7
-; RV32IXQCI-NEXT: lui a2, 5
-; RV32IXQCI-NEXT: addi a3, a1, 1328
-; RV32IXQCI-NEXT: addi a1, a2, -480
-; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a3
+; RV32IXQCI-NEXT: lui a2, 7
+; RV32IXQCI-NEXT: lui a1, 5
+; RV32IXQCI-NEXT: addi a1, a1, -480
+; RV32IXQCI-NEXT: beqz a0, .LBB51_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: addi a1, a2, 1328
+; RV32IXQCI-NEXT: .LBB51_2:
; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
%ret = select i1 %cond, i32 30000, i32 20000
@@ -2370,10 +2434,12 @@ define i32 @select_cst5(i1 zeroext %cond) {
;
; RV32IXQCI-LABEL: select_cst5:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: lui a1, 1
-; RV32IXQCI-NEXT: addi a2, a1, -2047
+; RV32IXQCI-NEXT: lui a2, 1
; RV32IXQCI-NEXT: li a1, 2047
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: bnez a0, .LBB53_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: addi a1, a2, -2047
+; RV32IXQCI-NEXT: .LBB53_2:
; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
%ret = select i1 %cond, i32 2047, i32 2049
@@ -2417,10 +2483,12 @@ define i32 @select_cst5_invert(i1 zeroext %cond) {
;
; RV32IXQCI-LABEL: select_cst5_invert:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: lui a1, 1
-; RV32IXQCI-NEXT: addi a2, a1, -2047
+; RV32IXQCI-NEXT: lui a2, 1
; RV32IXQCI-NEXT: li a1, 2047
-; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2
+; RV32IXQCI-NEXT: beqz a0, .LBB54_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: addi a1, a2, -2047
+; RV32IXQCI-NEXT: .LBB54_2:
; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
%ret = select i1 %cond, i32 2049, i32 2047
diff --git a/llvm/test/CodeGen/RISCV/xqcicli.ll b/llvm/test/CodeGen/RISCV/xqcicli.ll
index 8b97616..8d4caa1 100644
--- a/llvm/test/CodeGen/RISCV/xqcicli.ll
+++ b/llvm/test/CodeGen/RISCV/xqcicli.ll
@@ -4,7 +4,7 @@
; RUN: | FileCheck %s --check-prefixes=RV32I
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicli -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCICLI
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) {
diff --git a/llvm/test/CodeGen/RISCV/xqcicm.ll b/llvm/test/CodeGen/RISCV/xqcicm.ll
index fb48301..8e93496 100644
--- a/llvm/test/CodeGen/RISCV/xqcicm.ll
+++ b/llvm/test/CodeGen/RISCV/xqcicm.ll
@@ -6,7 +6,7 @@
; RUN: | FileCheck %s --check-prefixes=RV32IXQCICM
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCICM
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
define i32 @select_example(i32 %cond, i32 %x, i32 %y) {
diff --git a/llvm/test/CodeGen/RISCV/xqcics.ll b/llvm/test/CodeGen/RISCV/xqcics.ll
index 5b7ca9e7..c0839c9 100644
--- a/llvm/test/CodeGen/RISCV/xqcics.ll
+++ b/llvm/test/CodeGen/RISCV/xqcics.ll
@@ -6,7 +6,7 @@
; RUN: | FileCheck %s --check-prefixes=RV32IXQCICS
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcics,+experimental-xqcicm -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCICM
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
define i32 @select_cc_example_eq_s1(i32 %a, i32 %b, i32 %x, i32 %y) {
@@ -690,3 +690,127 @@ entry:
ret i32 %sel
}
+define i32 @select_cc_example_eq1(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_eq1:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: beq a1, a0, .LBB21_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: li a2, 11
+; RV32I-NEXT: .LBB21_2: # %entry
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: ret
+;
+; RV32IXQCICS-LABEL: select_cc_example_eq1:
+; RV32IXQCICS: # %bb.0: # %entry
+; RV32IXQCICS-NEXT: qc.selectieq a0, a1, a2, 11
+; RV32IXQCICS-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_eq1:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.selectieq a0, a1, a2, 11
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_eq1:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.line a2, a1, a0, 11
+; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp eq i32 %b, %a
+ %sel = select i1 %cmp, i32 %x, i32 11
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_ne1(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_ne1:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: bne a1, a0, .LBB22_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: li a2, 11
+; RV32I-NEXT: .LBB22_2: # %entry
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: ret
+;
+; RV32IXQCICS-LABEL: select_cc_example_ne1:
+; RV32IXQCICS: # %bb.0: # %entry
+; RV32IXQCICS-NEXT: qc.selectine a0, a1, a2, 11
+; RV32IXQCICS-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_ne1:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.selectine a0, a1, a2, 11
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_ne1:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.lieq a2, a1, a0, 11
+; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp ne i32 %b, %a
+ %sel = select i1 %cmp, i32 %x, i32 11
+ ret i32 %sel
+}
+
+
+define i32 @select_cc_example_eq2(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_eq2:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: beq a1, a0, .LBB23_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: li a0, 11
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB23_2:
+; RV32I-NEXT: li a0, 15
+; RV32I-NEXT: ret
+;
+; RV32IXQCICS-LABEL: select_cc_example_eq2:
+; RV32IXQCICS: # %bb.0: # %entry
+; RV32IXQCICS-NEXT: qc.selectiieq a0, a1, 15, 11
+; RV32IXQCICS-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_eq2:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.selectiieq a0, a1, 15, 11
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_eq2:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.selectiieq a0, a1, 15, 11
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp eq i32 %b, %a
+ %sel = select i1 %cmp, i32 15, i32 11
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_ne2(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_ne2:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: bne a1, a0, .LBB24_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: li a0, 11
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB24_2:
+; RV32I-NEXT: li a0, 15
+; RV32I-NEXT: ret
+;
+; RV32IXQCICS-LABEL: select_cc_example_ne2:
+; RV32IXQCICS: # %bb.0: # %entry
+; RV32IXQCICS-NEXT: qc.selectiine a0, a1, 15, 11
+; RV32IXQCICS-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_ne2:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.selectiine a0, a1, 15, 11
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_ne2:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.selectiine a0, a1, 15, 11
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp ne i32 %b, %a
+ %sel = select i1 %cmp, i32 15, i32 11
+ ret i32 %sel
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll
index cd52498..2964da9 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll
@@ -32,6 +32,7 @@
; CHECK-DAG: OpDecorate [[g]] Binding 0
; CHECK-DAG: OpDecorate [[h]] DescriptorSet 10
; CHECK-DAG: OpDecorate [[h]] Binding 3
+; CHECK-NOT: OpDecorate [[h]] Binding 4
; CHECK-DAG: OpDecorate [[i]] DescriptorSet 10
; CHECK-DAG: OpDecorate [[i]] Binding 2
@@ -44,30 +45,34 @@ entry:
%3 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 2, i32 1, i32 0, ptr nonnull @.str.6)
%4 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 1, i32 1, i32 0, ptr nonnull @.str.8)
%5 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 2, i32 10, i32 1, i32 0, ptr nonnull @.str.10)
- %6 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 1, i32 0, ptr nonnull @.str.12)
- %7 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 2, i32 1, i32 0, ptr nonnull @.str.14)
- %8 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %1, i32 0)
- %9 = load i32, ptr addrspace(11) %8, align 4
- %10 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %2, i32 0)
- %11 = load i32, ptr addrspace(11) %10, align 4
- %add.i = add nsw i32 %11, %9
- %12 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0)
- %13 = load i32, ptr addrspace(11) %12, align 4
- %add4.i = add nsw i32 %add.i, %13
- %14 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %4, i32 0)
- %15 = load i32, ptr addrspace(11) %14, align 4
- %add6.i = add nsw i32 %add4.i, %15
- %16 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %5, i32 0)
- %17 = load i32, ptr addrspace(11) %16, align 4
- %add8.i = add nsw i32 %add6.i, %17
- %18 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %6, i32 0)
- %19 = load i32, ptr addrspace(11) %18, align 4
- %add10.i = add nsw i32 %add8.i, %19
- %20 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %7, i32 0)
- %21 = load i32, ptr addrspace(11) %20, align 4
- %add12.i = add nsw i32 %add10.i, %21
- %22 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0)
- store i32 %add12.i, ptr addrspace(11) %22, align 4
+ %6 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 2, i32 0, ptr nonnull @.str.12)
+ %7 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 2, i32 1, ptr nonnull @.str.12)
+ %8 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 2, i32 1, i32 0, ptr nonnull @.str.14)
+ %9 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %1, i32 0)
+ %10 = load i32, ptr addrspace(11) %9, align 4
+ %11 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %2, i32 0)
+ %12 = load i32, ptr addrspace(11) %11, align 4
+ %add.i = add nsw i32 %12, %10
+ %13 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0)
+ %14 = load i32, ptr addrspace(11) %13, align 4
+ %add4.i = add nsw i32 %add.i, %14
+ %15 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %4, i32 0)
+ %16 = load i32, ptr addrspace(11) %15, align 4
+ %add6.i = add nsw i32 %add4.i, %16
+ %17 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %5, i32 0)
+ %18 = load i32, ptr addrspace(11) %17, align 4
+ %add8.i = add nsw i32 %add6.i, %18
+ %19 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %6, i32 0)
+ %20 = load i32, ptr addrspace(11) %19, align 4
+ %add10.i = add nsw i32 %add8.i, %20
+ %21 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %7, i32 0)
+ %22 = load i32, ptr addrspace(11) %21, align 4
+ %add12.i = add nsw i32 %add10.i, %22
+ %23 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %8, i32 0)
+ %24 = load i32, ptr addrspace(11) %23, align 4
+ %add14.i = add nsw i32 %add12.i, %24
+ %25 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0)
+ store i32 %add14.i, ptr addrspace(11) %25, align 4
ret void
}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/UniqueImplicitBindingNumber.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/UniqueImplicitBindingNumber.ll
new file mode 100644
index 0000000..c968c99
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/UniqueImplicitBindingNumber.ll
@@ -0,0 +1,19 @@
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; CHECK-ERROR: LLVM ERROR: Implicit binding calls with the same order ID must have the same descriptor set
+
+@.str = private unnamed_addr constant [2 x i8] c"b\00", align 1
+@.str.2 = private unnamed_addr constant [2 x i8] c"c\00", align 1
+
+define void @main() local_unnamed_addr #0 {
+entry:
+ %0 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str)
+ %1 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0)
+ %2 = load i32, ptr addrspace(11) %1, align 4
+ %3 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 1, i32 1, i32 0, ptr nonnull @.str.2)
+ %4 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0)
+ store i32 %2, ptr addrspace(11) %4, align 4
+ ret void
+}
+
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
diff --git a/llvm/test/CodeGen/SystemZ/fp-cmp-04.ll b/llvm/test/CodeGen/SystemZ/fp-cmp-04.ll
index d3d6413..eb7c1b6 100644
--- a/llvm/test/CodeGen/SystemZ/fp-cmp-04.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-cmp-04.ll
@@ -235,7 +235,7 @@ define half @f12_half(half %dummy, half %val, ptr %dest) {
; CHECK-NEXT: blah %f0
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: brasl %r14, __extendhfsf2@PLT
-; CHECK-NEXT: ltebr %f0, %f0
+; CHECK-NEXT: ltebr %f1, %f0
; CHECK-NEXT: jl .LBB11_2
; CHECK-NEXT:# %bb.1:
; CHECK-NEXT: lgdr %r0, %f8
@@ -344,7 +344,7 @@ define half @f15_half(half %val, half %dummy, ptr %dest) {
; CHECK-NEXT: blah %f2
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: brasl %r14, __extendhfsf2@PLT
-; CHECK-NEXT: ltebr %f0, %f0
+; CHECK-NEXT: ltebr %f1, %f0
; CHECK-NEXT: jl .LBB15_2
; CHECK-NEXT:# %bb.1:
; CHECK-NEXT: lgdr %r0, %f8
diff --git a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
index 41d2c02..5a79659 100644
--- a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
@@ -348,38 +348,35 @@ entry:
define <4 x float> @vector_add_f32(<4 x float> %lhs, <4 x float> %rhs) {
; CHECK-MVE-LABEL: vector_add_f32:
; CHECK-MVE: @ %bb.0: @ %entry
-; CHECK-MVE-NEXT: .save {r4, r5, r6, r7, lr}
-; CHECK-MVE-NEXT: push {r4, r5, r6, r7, lr}
-; CHECK-MVE-NEXT: .pad #4
-; CHECK-MVE-NEXT: sub sp, #4
+; CHECK-MVE-NEXT: .save {r4, r5, r6, r7, r8, lr}
+; CHECK-MVE-NEXT: push.w {r4, r5, r6, r7, r8, lr}
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
-; CHECK-MVE-NEXT: mov r4, r0
+; CHECK-MVE-NEXT: mov r8, r0
; CHECK-MVE-NEXT: add r0, sp, #40
; CHECK-MVE-NEXT: vldrw.u32 q4, [r0]
-; CHECK-MVE-NEXT: mov r6, r1
+; CHECK-MVE-NEXT: mov r7, r1
; CHECK-MVE-NEXT: mov r0, r3
-; CHECK-MVE-NEXT: mov r5, r2
-; CHECK-MVE-NEXT: vmov r7, r1, d9
+; CHECK-MVE-NEXT: mov r6, r2
+; CHECK-MVE-NEXT: vmov r4, r1, d9
; CHECK-MVE-NEXT: bl __aeabi_fadd
-; CHECK-MVE-NEXT: vmov s19, r0
-; CHECK-MVE-NEXT: mov r0, r5
-; CHECK-MVE-NEXT: mov r1, r7
-; CHECK-MVE-NEXT: bl __aeabi_fadd
-; CHECK-MVE-NEXT: vmov r5, r1, d8
-; CHECK-MVE-NEXT: vmov s18, r0
+; CHECK-MVE-NEXT: mov r5, r0
; CHECK-MVE-NEXT: mov r0, r6
+; CHECK-MVE-NEXT: mov r1, r4
; CHECK-MVE-NEXT: bl __aeabi_fadd
-; CHECK-MVE-NEXT: vmov s17, r0
-; CHECK-MVE-NEXT: mov r0, r4
-; CHECK-MVE-NEXT: mov r1, r5
+; CHECK-MVE-NEXT: vmov r6, r1, d8
+; CHECK-MVE-NEXT: mov r4, r0
+; CHECK-MVE-NEXT: mov r0, r7
; CHECK-MVE-NEXT: bl __aeabi_fadd
-; CHECK-MVE-NEXT: vmov s16, r0
-; CHECK-MVE-NEXT: vmov r2, r3, d9
-; CHECK-MVE-NEXT: vmov r0, r1, d8
+; CHECK-MVE-NEXT: mov r7, r0
+; CHECK-MVE-NEXT: mov r0, r8
+; CHECK-MVE-NEXT: mov r1, r6
+; CHECK-MVE-NEXT: bl __aeabi_fadd
+; CHECK-MVE-NEXT: mov r1, r7
+; CHECK-MVE-NEXT: mov r2, r4
+; CHECK-MVE-NEXT: mov r3, r5
; CHECK-MVE-NEXT: vpop {d8, d9}
-; CHECK-MVE-NEXT: add sp, #4
-; CHECK-MVE-NEXT: pop {r4, r5, r6, r7, pc}
+; CHECK-MVE-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
;
; CHECK-BE-LABEL: vector_add_f32:
; CHECK-BE: @ %bb.0: @ %entry
diff --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
index 4dd9173..93b5e3f 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
@@ -33,53 +33,29 @@ entry:
}
define void @vld3_v4i32(ptr %src, ptr %dst) {
-; CHECK-LV-LABEL: vld3_v4i32:
-; CHECK-LV: @ %bb.0: @ %entry
-; CHECK-LV-NEXT: .vsave {d8, d9}
-; CHECK-LV-NEXT: vpush {d8, d9}
-; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #16]
-; CHECK-LV-NEXT: vldrw.u32 q1, [r0]
-; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #32]
-; CHECK-LV-NEXT: vmov.f32 s10, s2
-; CHECK-LV-NEXT: vmov.f32 s13, s0
-; CHECK-LV-NEXT: vmov.f32 s14, s3
-; CHECK-LV-NEXT: vmov.f32 s8, s4
-; CHECK-LV-NEXT: vmov.f32 s9, s7
-; CHECK-LV-NEXT: vmov.f32 s12, s5
-; CHECK-LV-NEXT: vmov.f32 s15, s18
-; CHECK-LV-NEXT: vmov.f32 s11, s17
-; CHECK-LV-NEXT: vadd.i32 q2, q2, q3
-; CHECK-LV-NEXT: vmov.f32 s0, s6
-; CHECK-LV-NEXT: vmov.f32 s2, s16
-; CHECK-LV-NEXT: vmov.f32 s3, s19
-; CHECK-LV-NEXT: vadd.i32 q0, q2, q0
-; CHECK-LV-NEXT: vstrw.32 q0, [r1]
-; CHECK-LV-NEXT: vpop {d8, d9}
-; CHECK-LV-NEXT: bx lr
-;
-; CHECK-LIS-LABEL: vld3_v4i32:
-; CHECK-LIS: @ %bb.0: @ %entry
-; CHECK-LIS-NEXT: .vsave {d8, d9}
-; CHECK-LIS-NEXT: vpush {d8, d9}
-; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #16]
-; CHECK-LIS-NEXT: vldrw.u32 q1, [r0]
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32]
-; CHECK-LIS-NEXT: vmov.f32 s10, s2
-; CHECK-LIS-NEXT: vmov.f32 s17, s0
-; CHECK-LIS-NEXT: vmov.f32 s18, s3
-; CHECK-LIS-NEXT: vmov.f32 s8, s4
-; CHECK-LIS-NEXT: vmov.f32 s9, s7
-; CHECK-LIS-NEXT: vmov.f32 s16, s5
-; CHECK-LIS-NEXT: vmov.f32 s19, s14
-; CHECK-LIS-NEXT: vmov.f32 s11, s13
-; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4
-; CHECK-LIS-NEXT: vmov.f32 s0, s6
-; CHECK-LIS-NEXT: vmov.f32 s2, s12
-; CHECK-LIS-NEXT: vmov.f32 s3, s15
-; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0
-; CHECK-LIS-NEXT: vstrw.32 q0, [r1]
-; CHECK-LIS-NEXT: vpop {d8, d9}
-; CHECK-LIS-NEXT: bx lr
+; CHECK-LABEL: vld3_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: vldrw.u32 q0, [r0, #16]
+; CHECK-NEXT: vldrw.u32 q1, [r0]
+; CHECK-NEXT: vldrw.u32 q4, [r0, #32]
+; CHECK-NEXT: vmov.f32 s10, s2
+; CHECK-NEXT: vmov.f32 s13, s0
+; CHECK-NEXT: vmov.f32 s14, s3
+; CHECK-NEXT: vmov.f32 s8, s4
+; CHECK-NEXT: vmov.f32 s9, s7
+; CHECK-NEXT: vmov.f32 s12, s5
+; CHECK-NEXT: vmov.f32 s15, s18
+; CHECK-NEXT: vmov.f32 s11, s17
+; CHECK-NEXT: vadd.i32 q2, q2, q3
+; CHECK-NEXT: vmov.f32 s0, s6
+; CHECK-NEXT: vmov.f32 s2, s16
+; CHECK-NEXT: vmov.f32 s3, s19
+; CHECK-NEXT: vadd.i32 q0, q2, q0
+; CHECK-NEXT: vstrw.32 q0, [r1]
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: bx lr
entry:
%l1 = load <12 x i32>, ptr %src, align 4
@@ -93,87 +69,46 @@ entry:
}
define void @vld3_v8i32(ptr %src, ptr %dst) {
-; CHECK-LV-LABEL: vld3_v8i32:
-; CHECK-LV: @ %bb.0: @ %entry
-; CHECK-LV-NEXT: .vsave {d8, d9, d10, d11}
-; CHECK-LV-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #64]
-; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #48]
-; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #80]
-; CHECK-LV-NEXT: vmov.f32 s10, s2
-; CHECK-LV-NEXT: vmov.f32 s13, s0
-; CHECK-LV-NEXT: vmov.f32 s14, s3
-; CHECK-LV-NEXT: vmov.f32 s8, s4
-; CHECK-LV-NEXT: vmov.f32 s9, s7
-; CHECK-LV-NEXT: vmov.f32 s12, s5
-; CHECK-LV-NEXT: vmov.f32 s15, s18
-; CHECK-LV-NEXT: vmov.f32 s11, s17
-; CHECK-LV-NEXT: vadd.i32 q2, q2, q3
-; CHECK-LV-NEXT: vmov.f32 s0, s6
-; CHECK-LV-NEXT: vmov.f32 s2, s16
-; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #16]
-; CHECK-LV-NEXT: vmov.f32 s3, s19
-; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #32]
-; CHECK-LV-NEXT: vadd.i32 q0, q2, q0
-; CHECK-LV-NEXT: vldrw.u32 q2, [r0]
-; CHECK-LV-NEXT: vmov.f32 s17, s4
-; CHECK-LV-NEXT: vstrw.32 q0, [r1, #16]
-; CHECK-LV-NEXT: vmov.f32 s18, s7
-; CHECK-LV-NEXT: vmov.f32 s22, s6
-; CHECK-LV-NEXT: vmov.f32 s16, s9
-; CHECK-LV-NEXT: vmov.f32 s19, s14
-; CHECK-LV-NEXT: vmov.f32 s20, s8
-; CHECK-LV-NEXT: vmov.f32 s21, s11
-; CHECK-LV-NEXT: vmov.f32 s23, s13
-; CHECK-LV-NEXT: vadd.i32 q4, q5, q4
-; CHECK-LV-NEXT: vmov.f32 s4, s10
-; CHECK-LV-NEXT: vmov.f32 s6, s12
-; CHECK-LV-NEXT: vmov.f32 s7, s15
-; CHECK-LV-NEXT: vadd.i32 q1, q4, q1
-; CHECK-LV-NEXT: vstrw.32 q1, [r1]
-; CHECK-LV-NEXT: vpop {d8, d9, d10, d11}
-; CHECK-LV-NEXT: bx lr
-;
-; CHECK-LIS-LABEL: vld3_v8i32:
-; CHECK-LIS: @ %bb.0: @ %entry
-; CHECK-LIS-NEXT: .vsave {d8, d9, d10, d11}
-; CHECK-LIS-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #64]
-; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #48]
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #80]
-; CHECK-LIS-NEXT: vmov.f32 s10, s2
-; CHECK-LIS-NEXT: vmov.f32 s17, s0
-; CHECK-LIS-NEXT: vmov.f32 s18, s3
-; CHECK-LIS-NEXT: vmov.f32 s8, s4
-; CHECK-LIS-NEXT: vmov.f32 s9, s7
-; CHECK-LIS-NEXT: vmov.f32 s16, s5
-; CHECK-LIS-NEXT: vmov.f32 s19, s14
-; CHECK-LIS-NEXT: vmov.f32 s11, s13
-; CHECK-LIS-NEXT: vmov.f32 s0, s6
-; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4
-; CHECK-LIS-NEXT: vmov.f32 s2, s12
-; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #16]
-; CHECK-LIS-NEXT: vmov.f32 s3, s15
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32]
-; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0
-; CHECK-LIS-NEXT: vldrw.u32 q2, [r0]
-; CHECK-LIS-NEXT: vmov.f32 s17, s4
-; CHECK-LIS-NEXT: vstrw.32 q0, [r1, #16]
-; CHECK-LIS-NEXT: vmov.f32 s18, s7
-; CHECK-LIS-NEXT: vmov.f32 s22, s6
-; CHECK-LIS-NEXT: vmov.f32 s16, s9
-; CHECK-LIS-NEXT: vmov.f32 s19, s14
-; CHECK-LIS-NEXT: vmov.f32 s20, s8
-; CHECK-LIS-NEXT: vmov.f32 s21, s11
-; CHECK-LIS-NEXT: vmov.f32 s23, s13
-; CHECK-LIS-NEXT: vadd.i32 q4, q5, q4
-; CHECK-LIS-NEXT: vmov.f32 s4, s10
-; CHECK-LIS-NEXT: vmov.f32 s6, s12
-; CHECK-LIS-NEXT: vmov.f32 s7, s15
-; CHECK-LIS-NEXT: vadd.i32 q1, q4, q1
-; CHECK-LIS-NEXT: vstrw.32 q1, [r1]
-; CHECK-LIS-NEXT: vpop {d8, d9, d10, d11}
-; CHECK-LIS-NEXT: bx lr
+; CHECK-LABEL: vld3_v8i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: vldrw.u32 q0, [r0, #64]
+; CHECK-NEXT: vldrw.u32 q1, [r0, #48]
+; CHECK-NEXT: vldrw.u32 q4, [r0, #80]
+; CHECK-NEXT: vmov.f32 s10, s2
+; CHECK-NEXT: vmov.f32 s13, s0
+; CHECK-NEXT: vmov.f32 s14, s3
+; CHECK-NEXT: vmov.f32 s8, s4
+; CHECK-NEXT: vmov.f32 s9, s7
+; CHECK-NEXT: vmov.f32 s12, s5
+; CHECK-NEXT: vmov.f32 s15, s18
+; CHECK-NEXT: vmov.f32 s11, s17
+; CHECK-NEXT: vadd.i32 q2, q2, q3
+; CHECK-NEXT: vmov.f32 s0, s6
+; CHECK-NEXT: vmov.f32 s2, s16
+; CHECK-NEXT: vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT: vmov.f32 s3, s19
+; CHECK-NEXT: vldrw.u32 q3, [r0, #32]
+; CHECK-NEXT: vadd.i32 q0, q2, q0
+; CHECK-NEXT: vldrw.u32 q2, [r0]
+; CHECK-NEXT: vmov.f32 s17, s4
+; CHECK-NEXT: vstrw.32 q0, [r1, #16]
+; CHECK-NEXT: vmov.f32 s18, s7
+; CHECK-NEXT: vmov.f32 s22, s6
+; CHECK-NEXT: vmov.f32 s16, s9
+; CHECK-NEXT: vmov.f32 s19, s14
+; CHECK-NEXT: vmov.f32 s20, s8
+; CHECK-NEXT: vmov.f32 s21, s11
+; CHECK-NEXT: vmov.f32 s23, s13
+; CHECK-NEXT: vadd.i32 q4, q5, q4
+; CHECK-NEXT: vmov.f32 s4, s10
+; CHECK-NEXT: vmov.f32 s6, s12
+; CHECK-NEXT: vmov.f32 s7, s15
+; CHECK-NEXT: vadd.i32 q1, q4, q1
+; CHECK-NEXT: vstrw.32 q1, [r1]
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEXT: bx lr
entry:
%l1 = load <24 x i32>, ptr %src, align 4
@@ -187,155 +122,80 @@ entry:
}
define void @vld3_v16i32(ptr %src, ptr %dst) {
-; CHECK-LV-LABEL: vld3_v16i32:
-; CHECK-LV: @ %bb.0: @ %entry
-; CHECK-LV-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-LV-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #64]
-; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #48]
-; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #80]
-; CHECK-LV-NEXT: vldrw.u32 q6, [r0, #176]
-; CHECK-LV-NEXT: vmov.f32 s10, s2
-; CHECK-LV-NEXT: vmov.f32 s13, s0
-; CHECK-LV-NEXT: vmov.f32 s14, s3
-; CHECK-LV-NEXT: vmov.f32 s8, s4
-; CHECK-LV-NEXT: vmov.f32 s9, s7
-; CHECK-LV-NEXT: vmov.f32 s12, s5
-; CHECK-LV-NEXT: vmov.f32 s15, s18
-; CHECK-LV-NEXT: vmov.f32 s11, s17
-; CHECK-LV-NEXT: vadd.i32 q2, q2, q3
-; CHECK-LV-NEXT: vmov.f32 s0, s6
-; CHECK-LV-NEXT: vmov.f32 s2, s16
-; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #16]
-; CHECK-LV-NEXT: vmov.f32 s3, s19
-; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #32]
-; CHECK-LV-NEXT: vadd.i32 q0, q2, q0
-; CHECK-LV-NEXT: vldrw.u32 q2, [r0]
-; CHECK-LV-NEXT: vmov.f32 s17, s4
-; CHECK-LV-NEXT: vmov.f32 s18, s7
-; CHECK-LV-NEXT: vmov.f32 s22, s6
-; CHECK-LV-NEXT: vmov.f32 s16, s9
-; CHECK-LV-NEXT: vmov.f32 s19, s14
-; CHECK-LV-NEXT: vmov.f32 s20, s8
-; CHECK-LV-NEXT: vmov.f32 s21, s11
-; CHECK-LV-NEXT: vmov.f32 s23, s13
-; CHECK-LV-NEXT: vmov.f32 s4, s10
-; CHECK-LV-NEXT: vldrw.u32 q2, [r0, #160]
-; CHECK-LV-NEXT: vmov.f32 s6, s12
-; CHECK-LV-NEXT: vadd.i32 q4, q5, q4
-; CHECK-LV-NEXT: vmov.f32 s7, s15
-; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #144]
-; CHECK-LV-NEXT: vadd.i32 q1, q4, q1
-; CHECK-LV-NEXT: vmov.f32 s18, s10
-; CHECK-LV-NEXT: vmov.f32 s21, s8
-; CHECK-LV-NEXT: vmov.f32 s22, s11
-; CHECK-LV-NEXT: vmov.f32 s16, s12
-; CHECK-LV-NEXT: vmov.f32 s17, s15
-; CHECK-LV-NEXT: vmov.f32 s20, s13
-; CHECK-LV-NEXT: vmov.f32 s23, s26
-; CHECK-LV-NEXT: vmov.f32 s19, s25
-; CHECK-LV-NEXT: vadd.i32 q4, q4, q5
-; CHECK-LV-NEXT: vmov.f32 s8, s14
-; CHECK-LV-NEXT: vmov.f32 s10, s24
-; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #112]
-; CHECK-LV-NEXT: vmov.f32 s11, s27
-; CHECK-LV-NEXT: vldrw.u32 q5, [r0, #128]
-; CHECK-LV-NEXT: vadd.i32 q2, q4, q2
-; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #96]
-; CHECK-LV-NEXT: vmov.f32 s25, s12
-; CHECK-LV-NEXT: vstrw.32 q2, [r1, #48]
-; CHECK-LV-NEXT: vmov.f32 s26, s15
-; CHECK-LV-NEXT: vstrw.32 q0, [r1, #16]
-; CHECK-LV-NEXT: vmov.f32 s30, s14
-; CHECK-LV-NEXT: vstrw.32 q1, [r1]
-; CHECK-LV-NEXT: vmov.f32 s24, s17
-; CHECK-LV-NEXT: vmov.f32 s27, s22
-; CHECK-LV-NEXT: vmov.f32 s28, s16
-; CHECK-LV-NEXT: vmov.f32 s29, s19
-; CHECK-LV-NEXT: vmov.f32 s31, s21
-; CHECK-LV-NEXT: vadd.i32 q6, q7, q6
-; CHECK-LV-NEXT: vmov.f32 s12, s18
-; CHECK-LV-NEXT: vmov.f32 s14, s20
-; CHECK-LV-NEXT: vmov.f32 s15, s23
-; CHECK-LV-NEXT: vadd.i32 q3, q6, q3
-; CHECK-LV-NEXT: vstrw.32 q3, [r1, #32]
-; CHECK-LV-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-LV-NEXT: bx lr
-;
-; CHECK-LIS-LABEL: vld3_v16i32:
-; CHECK-LIS: @ %bb.0: @ %entry
-; CHECK-LIS-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-LIS-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #64]
-; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #48]
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #80]
-; CHECK-LIS-NEXT: vmov.f32 s10, s2
-; CHECK-LIS-NEXT: vmov.f32 s17, s0
-; CHECK-LIS-NEXT: vmov.f32 s18, s3
-; CHECK-LIS-NEXT: vmov.f32 s8, s4
-; CHECK-LIS-NEXT: vmov.f32 s9, s7
-; CHECK-LIS-NEXT: vmov.f32 s16, s5
-; CHECK-LIS-NEXT: vmov.f32 s19, s14
-; CHECK-LIS-NEXT: vmov.f32 s11, s13
-; CHECK-LIS-NEXT: vmov.f32 s0, s6
-; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4
-; CHECK-LIS-NEXT: vmov.f32 s2, s12
-; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #16]
-; CHECK-LIS-NEXT: vmov.f32 s3, s15
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32]
-; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0
-; CHECK-LIS-NEXT: vldrw.u32 q2, [r0]
-; CHECK-LIS-NEXT: vmov.f32 s17, s4
-; CHECK-LIS-NEXT: vmov.f32 s18, s7
-; CHECK-LIS-NEXT: vmov.f32 s22, s6
-; CHECK-LIS-NEXT: vmov.f32 s16, s9
-; CHECK-LIS-NEXT: vmov.f32 s19, s14
-; CHECK-LIS-NEXT: vmov.f32 s20, s8
-; CHECK-LIS-NEXT: vmov.f32 s21, s11
-; CHECK-LIS-NEXT: vmov.f32 s23, s13
-; CHECK-LIS-NEXT: vadd.i32 q4, q5, q4
-; CHECK-LIS-NEXT: vmov.f32 s4, s10
-; CHECK-LIS-NEXT: vldrw.u32 q2, [r0, #160]
-; CHECK-LIS-NEXT: vldrw.u32 q5, [r0, #176]
-; CHECK-LIS-NEXT: vmov.f32 s6, s12
-; CHECK-LIS-NEXT: vmov.f32 s7, s15
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #144]
-; CHECK-LIS-NEXT: vadd.i32 q1, q4, q1
-; CHECK-LIS-NEXT: vmov.f32 s18, s10
-; CHECK-LIS-NEXT: vmov.f32 s25, s8
-; CHECK-LIS-NEXT: vmov.f32 s26, s11
-; CHECK-LIS-NEXT: vmov.f32 s16, s12
-; CHECK-LIS-NEXT: vmov.f32 s17, s15
-; CHECK-LIS-NEXT: vmov.f32 s24, s13
-; CHECK-LIS-NEXT: vmov.f32 s27, s22
-; CHECK-LIS-NEXT: vmov.f32 s19, s21
-; CHECK-LIS-NEXT: vmov.f32 s8, s14
-; CHECK-LIS-NEXT: vadd.i32 q4, q4, q6
-; CHECK-LIS-NEXT: vmov.f32 s10, s20
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #112]
-; CHECK-LIS-NEXT: vmov.f32 s11, s23
-; CHECK-LIS-NEXT: vldrw.u32 q5, [r0, #128]
-; CHECK-LIS-NEXT: vadd.i32 q2, q4, q2
-; CHECK-LIS-NEXT: vldrw.u32 q4, [r0, #96]
-; CHECK-LIS-NEXT: vmov.f32 s25, s12
-; CHECK-LIS-NEXT: vstrw.32 q2, [r1, #48]
-; CHECK-LIS-NEXT: vmov.f32 s26, s15
-; CHECK-LIS-NEXT: vstrw.32 q0, [r1, #16]
-; CHECK-LIS-NEXT: vmov.f32 s30, s14
-; CHECK-LIS-NEXT: vstrw.32 q1, [r1]
-; CHECK-LIS-NEXT: vmov.f32 s24, s17
-; CHECK-LIS-NEXT: vmov.f32 s27, s22
-; CHECK-LIS-NEXT: vmov.f32 s28, s16
-; CHECK-LIS-NEXT: vmov.f32 s29, s19
-; CHECK-LIS-NEXT: vmov.f32 s31, s21
-; CHECK-LIS-NEXT: vadd.i32 q6, q7, q6
-; CHECK-LIS-NEXT: vmov.f32 s12, s18
-; CHECK-LIS-NEXT: vmov.f32 s14, s20
-; CHECK-LIS-NEXT: vmov.f32 s15, s23
-; CHECK-LIS-NEXT: vadd.i32 q3, q6, q3
-; CHECK-LIS-NEXT: vstrw.32 q3, [r1, #32]
-; CHECK-LIS-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-LIS-NEXT: bx lr
+; CHECK-LABEL: vld3_v16i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: vldrw.u32 q0, [r0, #64]
+; CHECK-NEXT: vldrw.u32 q1, [r0, #48]
+; CHECK-NEXT: vldrw.u32 q4, [r0, #80]
+; CHECK-NEXT: vldrw.u32 q6, [r0, #176]
+; CHECK-NEXT: vmov.f32 s10, s2
+; CHECK-NEXT: vmov.f32 s13, s0
+; CHECK-NEXT: vmov.f32 s14, s3
+; CHECK-NEXT: vmov.f32 s8, s4
+; CHECK-NEXT: vmov.f32 s9, s7
+; CHECK-NEXT: vmov.f32 s12, s5
+; CHECK-NEXT: vmov.f32 s15, s18
+; CHECK-NEXT: vmov.f32 s11, s17
+; CHECK-NEXT: vadd.i32 q2, q2, q3
+; CHECK-NEXT: vmov.f32 s0, s6
+; CHECK-NEXT: vmov.f32 s2, s16
+; CHECK-NEXT: vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT: vmov.f32 s3, s19
+; CHECK-NEXT: vldrw.u32 q3, [r0, #32]
+; CHECK-NEXT: vadd.i32 q0, q2, q0
+; CHECK-NEXT: vldrw.u32 q2, [r0]
+; CHECK-NEXT: vmov.f32 s17, s4
+; CHECK-NEXT: vmov.f32 s18, s7
+; CHECK-NEXT: vmov.f32 s22, s6
+; CHECK-NEXT: vmov.f32 s16, s9
+; CHECK-NEXT: vmov.f32 s19, s14
+; CHECK-NEXT: vmov.f32 s20, s8
+; CHECK-NEXT: vmov.f32 s21, s11
+; CHECK-NEXT: vmov.f32 s23, s13
+; CHECK-NEXT: vmov.f32 s4, s10
+; CHECK-NEXT: vldrw.u32 q2, [r0, #160]
+; CHECK-NEXT: vmov.f32 s6, s12
+; CHECK-NEXT: vadd.i32 q4, q5, q4
+; CHECK-NEXT: vmov.f32 s7, s15
+; CHECK-NEXT: vldrw.u32 q3, [r0, #144]
+; CHECK-NEXT: vadd.i32 q1, q4, q1
+; CHECK-NEXT: vmov.f32 s18, s10
+; CHECK-NEXT: vmov.f32 s21, s8
+; CHECK-NEXT: vmov.f32 s22, s11
+; CHECK-NEXT: vmov.f32 s16, s12
+; CHECK-NEXT: vmov.f32 s17, s15
+; CHECK-NEXT: vmov.f32 s20, s13
+; CHECK-NEXT: vmov.f32 s23, s26
+; CHECK-NEXT: vmov.f32 s19, s25
+; CHECK-NEXT: vadd.i32 q4, q4, q5
+; CHECK-NEXT: vmov.f32 s8, s14
+; CHECK-NEXT: vmov.f32 s10, s24
+; CHECK-NEXT: vldrw.u32 q3, [r0, #112]
+; CHECK-NEXT: vmov.f32 s11, s27
+; CHECK-NEXT: vldrw.u32 q5, [r0, #128]
+; CHECK-NEXT: vadd.i32 q2, q4, q2
+; CHECK-NEXT: vldrw.u32 q4, [r0, #96]
+; CHECK-NEXT: vmov.f32 s25, s12
+; CHECK-NEXT: vstrw.32 q2, [r1, #48]
+; CHECK-NEXT: vmov.f32 s26, s15
+; CHECK-NEXT: vstrw.32 q0, [r1, #16]
+; CHECK-NEXT: vmov.f32 s30, s14
+; CHECK-NEXT: vstrw.32 q1, [r1]
+; CHECK-NEXT: vmov.f32 s24, s17
+; CHECK-NEXT: vmov.f32 s27, s22
+; CHECK-NEXT: vmov.f32 s28, s16
+; CHECK-NEXT: vmov.f32 s29, s19
+; CHECK-NEXT: vmov.f32 s31, s21
+; CHECK-NEXT: vadd.i32 q6, q7, q6
+; CHECK-NEXT: vmov.f32 s12, s18
+; CHECK-NEXT: vmov.f32 s14, s20
+; CHECK-NEXT: vmov.f32 s15, s23
+; CHECK-NEXT: vadd.i32 q3, q6, q3
+; CHECK-NEXT: vstrw.32 q3, [r1, #32]
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: bx lr
entry:
%l1 = load <48 x i32>, ptr %src, align 4
diff --git a/llvm/test/CodeGen/VE/Vector/vec_divrem.ll b/llvm/test/CodeGen/VE/Vector/vec_divrem.ll
index 3bc0aba..93e2889 100644
--- a/llvm/test/CodeGen/VE/Vector/vec_divrem.ll
+++ b/llvm/test/CodeGen/VE/Vector/vec_divrem.ll
@@ -7,19 +7,22 @@
define <4 x i8> @udiv_by_minus_one(<4 x i8> %x) {
; CHECK-LABEL: udiv_by_minus_one:
; CHECK: # %bb.0:
-; CHECK-NEXT: and %s0, %s0, (56)0
-; CHECK-NEXT: lea %s4, 16843010
-; CHECK-NEXT: muls.l %s0, %s0, %s4
-; CHECK-NEXT: srl %s0, %s0, 32
+; CHECK-NEXT: and %s4, %s0, (56)0
; CHECK-NEXT: and %s1, %s1, (56)0
-; CHECK-NEXT: muls.l %s1, %s1, %s4
-; CHECK-NEXT: srl %s1, %s1, 32
; CHECK-NEXT: and %s2, %s2, (56)0
-; CHECK-NEXT: muls.l %s2, %s2, %s4
-; CHECK-NEXT: srl %s2, %s2, 32
; CHECK-NEXT: and %s3, %s3, (56)0
-; CHECK-NEXT: muls.l %s3, %s3, %s4
-; CHECK-NEXT: srl %s3, %s3, 32
+; CHECK-NEXT: or %s0, 0, (0)1
+; CHECK-NEXT: cmpu.w %s5, %s3, (56)0
+; CHECK-NEXT: or %s3, 0, (0)1
+; CHECK-NEXT: cmov.w.eq %s3, (63)0, %s5
+; CHECK-NEXT: cmpu.w %s5, %s2, (56)0
+; CHECK-NEXT: or %s2, 0, (0)1
+; CHECK-NEXT: cmov.w.eq %s2, (63)0, %s5
+; CHECK-NEXT: cmpu.w %s5, %s1, (56)0
+; CHECK-NEXT: or %s1, 0, (0)1
+; CHECK-NEXT: cmov.w.eq %s1, (63)0, %s5
+; CHECK-NEXT: cmpu.w %s4, %s4, (56)0
+; CHECK-NEXT: cmov.w.eq %s0, (63)0, %s4
; CHECK-NEXT: b.l.t (, %s10)
%r = udiv <4 x i8> %x, <i8 255, i8 255, i8 255, i8 255>
ret <4 x i8> %r
@@ -28,27 +31,18 @@ define <4 x i8> @udiv_by_minus_one(<4 x i8> %x) {
define <4 x i8> @urem_by_minus_one(<4 x i8> %x) {
; CHECK-LABEL: urem_by_minus_one:
; CHECK: # %bb.0:
-; CHECK-NEXT: and %s0, %s0, (56)0
-; CHECK-NEXT: and %s1, %s1, (56)0
-; CHECK-NEXT: and %s2, %s2, (56)0
-; CHECK-NEXT: and %s3, %s3, (56)0
-; CHECK-NEXT: lea %s4, 16843010
-; CHECK-NEXT: muls.l %s5, %s3, %s4
-; CHECK-NEXT: srl %s5, %s5, 32
-; CHECK-NEXT: muls.w.sx %s5, %s5, (56)0
-; CHECK-NEXT: subs.w.sx %s3, %s3, %s5
-; CHECK-NEXT: muls.l %s5, %s2, %s4
-; CHECK-NEXT: srl %s5, %s5, 32
-; CHECK-NEXT: muls.w.sx %s5, %s5, (56)0
-; CHECK-NEXT: subs.w.sx %s2, %s2, %s5
-; CHECK-NEXT: muls.l %s5, %s1, %s4
-; CHECK-NEXT: srl %s5, %s5, 32
-; CHECK-NEXT: muls.w.sx %s5, %s5, (56)0
-; CHECK-NEXT: subs.w.sx %s1, %s1, %s5
-; CHECK-NEXT: muls.l %s4, %s0, %s4
-; CHECK-NEXT: srl %s4, %s4, 32
-; CHECK-NEXT: muls.w.sx %s4, %s4, (56)0
-; CHECK-NEXT: subs.w.sx %s0, %s0, %s4
+; CHECK-NEXT: and %s4, %s0, (56)0
+; CHECK-NEXT: and %s5, %s1, (56)0
+; CHECK-NEXT: and %s6, %s2, (56)0
+; CHECK-NEXT: and %s7, %s3, (56)0
+; CHECK-NEXT: cmpu.w %s7, %s7, (56)0
+; CHECK-NEXT: cmov.w.eq %s3, (0)1, %s7
+; CHECK-NEXT: cmpu.w %s6, %s6, (56)0
+; CHECK-NEXT: cmov.w.eq %s2, (0)1, %s6
+; CHECK-NEXT: cmpu.w %s5, %s5, (56)0
+; CHECK-NEXT: cmov.w.eq %s1, (0)1, %s5
+; CHECK-NEXT: cmpu.w %s4, %s4, (56)0
+; CHECK-NEXT: cmov.w.eq %s0, (0)1, %s4
; CHECK-NEXT: b.l.t (, %s10)
%r = urem <4 x i8> %x, <i8 255, i8 255, i8 255, i8 255>
ret <4 x i8> %r
diff --git a/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll b/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
index 6ef7219..9cf7aab 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
@@ -56,14 +56,9 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind {
; CHECK-LABEL: PR90954:
; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rbp
-; CHECK-NEXT: movq %rsp, %rbp
-; CHECK-NEXT: pushq %r15
; CHECK-NEXT: pushq %r14
-; CHECK-NEXT: pushq %r13
-; CHECK-NEXT: pushq %r12
; CHECK-NEXT: pushq %rbx
-; CHECK-NEXT: andq $-1024, %rsp # imm = 0xFC00
-; CHECK-NEXT: subq $5120, %rsp # imm = 0x1400
+; CHECK-NEXT: subq $2912, %rsp # imm = 0xB60
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movb $1, {{[0-9]+}}(%rsp)
@@ -79,29 +74,26 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind {
; CHECK-NEXT: movw $64, %cx
; CHECK-NEXT: movw $16, %di
; CHECK-NEXT: movb $1, %r8b
-; CHECK-NEXT: movl $64, %r9d
-; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %r10
-; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %r11
-; CHECK-NEXT: xorl %ebx, %ebx
-; CHECK-NEXT: xorl %r14d, %r14d
+; CHECK-NEXT: xorl %r9d, %r9d
+; CHECK-NEXT: xorl %r10d, %r10d
; CHECK-NEXT: jmp .LBB1_1
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB1_5: # in Loop: Header=BB1_1 Depth=1
-; CHECK-NEXT: incq %r14
-; CHECK-NEXT: addl %edx, %ebx
+; CHECK-NEXT: incq %r10
+; CHECK-NEXT: addl %edx, %r9d
; CHECK-NEXT: .LBB1_1: # =>This Loop Header: Depth=1
; CHECK-NEXT: # Child Loop BB1_2 Depth 2
-; CHECK-NEXT: movslq %ebx, %r15
-; CHECK-NEXT: leaq (%rsi,%r15,4), %r15
-; CHECK-NEXT: xorl %r12d, %r12d
-; CHECK-NEXT: xorl %r13d, %r13d
+; CHECK-NEXT: movslq %r9d, %r11
+; CHECK-NEXT: leaq (%rsi,%r11,4), %r11
+; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: xorl %r14d, %r14d
; CHECK-NEXT: jmp .LBB1_2
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB1_4: # in Loop: Header=BB1_2 Depth=2
-; CHECK-NEXT: tilestored %tmm1, (%r15,%rax)
-; CHECK-NEXT: incq %r13
-; CHECK-NEXT: addq $64, %r15
-; CHECK-NEXT: decq %r12
+; CHECK-NEXT: tilestored %tmm1, (%r11,%rax)
+; CHECK-NEXT: incq %r14
+; CHECK-NEXT: addq $64, %r11
+; CHECK-NEXT: decq %rbx
; CHECK-NEXT: je .LBB1_5
; CHECK-NEXT: .LBB1_2: # Parent Loop BB1_1 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
@@ -110,46 +102,12 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind {
; CHECK-NEXT: testb %r8b, %r8b
; CHECK-NEXT: jne .LBB1_4
; CHECK-NEXT: # %bb.3: # in Loop: Header=BB1_2 Depth=2
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: tileloadd (%r10,%r9), %tmm1
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: tileloadd (%r11,%r9), %tmm2
+; CHECK-NEXT: tilezero %tmm1
+; CHECK-NEXT: tilezero %tmm2
; CHECK-NEXT: tdpbf16ps %tmm2, %tmm1, %tmm0
-; CHECK-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movabsq $64, %rax
-; CHECK-NEXT: tilestored %tmm0, 3072(%rsp,%rax) # 1024-byte Folded Spill
-; CHECK-NEXT: tileloadd 3072(%rsp,%rax), %tmm1 # 1024-byte Folded Reload
-; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; CHECK-NEXT: movabsq $64, %rbp
+; CHECK-NEXT: tilestored %tmm0, 896(%rsp,%rbp) # 1024-byte Folded Spill
+; CHECK-NEXT: tileloadd 896(%rsp,%rbp), %tmm1 # 1024-byte Folded Reload
; CHECK-NEXT: jmp .LBB1_4
%4 = shl i32 %2, 4
%5 = icmp eq i64 0, 0
diff --git a/llvm/test/CodeGen/X86/combine-pack.ll b/llvm/test/CodeGen/X86/combine-pack.ll
new file mode 100644
index 0000000..2f5454d
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-pack.ll
@@ -0,0 +1,49 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX
+
+declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @combine_packss_v4i32_signsplat(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_packss_v4i32_signsplat:
+; SSE: # %bb.0:
+; SSE-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_packss_v4i32_signsplat:
+; AVX: # %bb.0:
+; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %cmp = icmp sgt <4 x i32> %a0, %a1
+ %ext = sext <4 x i1> %cmp to <4 x i32>
+ %pack = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %ext, <4 x i32> splat (i32 -1))
+ %signsplat = ashr <8 x i16> %pack, splat (i16 15)
+ ret <8 x i16> %signsplat
+}
+
+define <8 x i16> @combine_packss_v4i32_freeze_signsplat(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_packss_v4i32_freeze_signsplat:
+; SSE: # %bb.0:
+; SSE-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_packss_v4i32_freeze_signsplat:
+; AVX: # %bb.0:
+; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %cmp = icmp sgt <4 x i32> %a0, %a1
+ %ext = sext <4 x i1> %cmp to <4 x i32>
+ %pack = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %ext, <4 x i32> splat (i32 -1))
+ %freeze = freeze <8 x i16> %pack
+ %signsplat = ashr <8 x i16> %freeze, splat (i16 15)
+ ret <8 x i16> %signsplat
+}
diff --git a/llvm/test/CodeGen/X86/fshl.ll b/llvm/test/CodeGen/X86/fshl.ll
index ec1b8a3..f998128 100644
--- a/llvm/test/CodeGen/X86/fshl.ll
+++ b/llvm/test/CodeGen/X86/fshl.ll
@@ -335,84 +335,83 @@ define i128 @var_shift_i128(i128 %x, i128 %y, i128 %z) nounwind {
; X86-SLOW-NEXT: pushl %esi
; X86-SLOW-NEXT: andl $-16, %esp
; X86-SLOW-NEXT: subl $32, %esp
-; X86-SLOW-NEXT: movl 24(%ebp), %esi
+; X86-SLOW-NEXT: movl 24(%ebp), %edi
; X86-SLOW-NEXT: movl 28(%ebp), %eax
; X86-SLOW-NEXT: movl 48(%ebp), %edx
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
; X86-SLOW-NEXT: testb $64, %cl
-; X86-SLOW-NEXT: movl 52(%ebp), %edi
+; X86-SLOW-NEXT: movl 52(%ebp), %ebx
; X86-SLOW-NEXT: jne .LBB6_1
; X86-SLOW-NEXT: # %bb.2:
; X86-SLOW-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT: movl %esi, %edx
-; X86-SLOW-NEXT: movl 32(%ebp), %esi
-; X86-SLOW-NEXT: movl %edi, %ecx
-; X86-SLOW-NEXT: movl %eax, %edi
+; X86-SLOW-NEXT: movl %edi, %edx
+; X86-SLOW-NEXT: movl 32(%ebp), %edi
+; X86-SLOW-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl %eax, %ebx
; X86-SLOW-NEXT: movl 36(%ebp), %eax
; X86-SLOW-NEXT: jmp .LBB6_3
; X86-SLOW-NEXT: .LBB6_1:
; X86-SLOW-NEXT: movl 40(%ebp), %ecx
; X86-SLOW-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SLOW-NEXT: movl 44(%ebp), %ecx
+; X86-SLOW-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SLOW-NEXT: .LBB6_3:
-; X86-SLOW-NEXT: movl 56(%ebp), %ebx
-; X86-SLOW-NEXT: testb $32, %bl
+; X86-SLOW-NEXT: movl 56(%ebp), %ecx
+; X86-SLOW-NEXT: testb $32, %cl
; X86-SLOW-NEXT: jne .LBB6_4
; X86-SLOW-NEXT: # %bb.5:
-; X86-SLOW-NEXT: movl %ecx, %ebx
; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl %edx, %edi
+; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-SLOW-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SLOW-NEXT: jmp .LBB6_6
; X86-SLOW-NEXT: .LBB6_4:
-; X86-SLOW-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT: movl %ecx, %edx
-; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-SLOW-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl %edx, %ebx
+; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-SLOW-NEXT: .LBB6_6:
-; X86-SLOW-NEXT: movl %edx, %esi
+; X86-SLOW-NEXT: movl %edi, %eax
+; X86-SLOW-NEXT: shll %cl, %eax
+; X86-SLOW-NEXT: shrl %esi
+; X86-SLOW-NEXT: movl %ecx, %edx
+; X86-SLOW-NEXT: notb %dl
+; X86-SLOW-NEXT: movl %edx, %ecx
+; X86-SLOW-NEXT: shrl %cl, %esi
+; X86-SLOW-NEXT: orl %eax, %esi
+; X86-SLOW-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl %ebx, %eax
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
-; X86-SLOW-NEXT: shll %cl, %esi
-; X86-SLOW-NEXT: movl %ebx, %edi
+; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SLOW-NEXT: shll %cl, %eax
; X86-SLOW-NEXT: shrl %edi
-; X86-SLOW-NEXT: movl %ecx, %ebx
-; X86-SLOW-NEXT: notb %bl
-; X86-SLOW-NEXT: movl %ebx, %ecx
-; X86-SLOW-NEXT: movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-SLOW-NEXT: movl %edx, %ecx
; X86-SLOW-NEXT: shrl %cl, %edi
-; X86-SLOW-NEXT: orl %esi, %edi
+; X86-SLOW-NEXT: orl %eax, %edi
; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-SLOW-NEXT: movl %esi, %eax
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-SLOW-NEXT: shll %cl, %eax
-; X86-SLOW-NEXT: shrl %edx
-; X86-SLOW-NEXT: movl %ebx, %ecx
-; X86-SLOW-NEXT: shrl %cl, %edx
-; X86-SLOW-NEXT: orl %eax, %edx
-; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-SLOW-NEXT: movl %ebx, %eax
+; X86-SLOW-NEXT: shrl %ebx
+; X86-SLOW-NEXT: movl %edx, %ecx
+; X86-SLOW-NEXT: shrl %cl, %ebx
+; X86-SLOW-NEXT: orl %eax, %ebx
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-SLOW-NEXT: shll %cl, %eax
; X86-SLOW-NEXT: shrl %esi
-; X86-SLOW-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-SLOW-NEXT: movl %edx, %ecx
; X86-SLOW-NEXT: shrl %cl, %esi
; X86-SLOW-NEXT: orl %eax, %esi
-; X86-SLOW-NEXT: movl 56(%ebp), %ecx
-; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-SLOW-NEXT: shll %cl, %eax
-; X86-SLOW-NEXT: shrl %ebx
-; X86-SLOW-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
-; X86-SLOW-NEXT: shrl %cl, %ebx
-; X86-SLOW-NEXT: orl %eax, %ebx
; X86-SLOW-NEXT: movl 8(%ebp), %eax
-; X86-SLOW-NEXT: movl %ebx, 12(%eax)
-; X86-SLOW-NEXT: movl %esi, 8(%eax)
-; X86-SLOW-NEXT: movl %edx, 4(%eax)
-; X86-SLOW-NEXT: movl %edi, (%eax)
+; X86-SLOW-NEXT: movl %esi, 12(%eax)
+; X86-SLOW-NEXT: movl %ebx, 8(%eax)
+; X86-SLOW-NEXT: movl %edi, 4(%eax)
+; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-SLOW-NEXT: movl %ecx, (%eax)
; X86-SLOW-NEXT: leal -12(%ebp), %esp
; X86-SLOW-NEXT: popl %esi
; X86-SLOW-NEXT: popl %edi
diff --git a/llvm/test/CodeGen/X86/fshr.ll b/llvm/test/CodeGen/X86/fshr.ll
index 544ab7f..c307833 100644
--- a/llvm/test/CodeGen/X86/fshr.ll
+++ b/llvm/test/CodeGen/X86/fshr.ll
@@ -322,79 +322,79 @@ define i128 @var_shift_i128(i128 %x, i128 %y, i128 %z) nounwind {
; X86-SLOW-NEXT: subl $16, %esp
; X86-SLOW-NEXT: movl 24(%ebp), %edx
; X86-SLOW-NEXT: movl 28(%ebp), %esi
-; X86-SLOW-NEXT: movl 48(%ebp), %ebx
+; X86-SLOW-NEXT: movl 48(%ebp), %edi
; X86-SLOW-NEXT: movl 56(%ebp), %eax
; X86-SLOW-NEXT: testb $64, %al
-; X86-SLOW-NEXT: movl 52(%ebp), %edi
+; X86-SLOW-NEXT: movl 52(%ebp), %eax
; X86-SLOW-NEXT: je .LBB6_1
; X86-SLOW-NEXT: # %bb.2:
-; X86-SLOW-NEXT: movl %ebx, (%esp) # 4-byte Spill
-; X86-SLOW-NEXT: movl %edx, %ebx
+; X86-SLOW-NEXT: movl %edi, (%esp) # 4-byte Spill
+; X86-SLOW-NEXT: movl %edx, %edi
; X86-SLOW-NEXT: movl 32(%ebp), %edx
-; X86-SLOW-NEXT: movl %edi, %eax
-; X86-SLOW-NEXT: movl %esi, %edi
+; X86-SLOW-NEXT: movl %eax, %ecx
+; X86-SLOW-NEXT: movl %esi, %eax
; X86-SLOW-NEXT: movl 36(%ebp), %esi
; X86-SLOW-NEXT: jmp .LBB6_3
; X86-SLOW-NEXT: .LBB6_1:
-; X86-SLOW-NEXT: movl 40(%ebp), %eax
-; X86-SLOW-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-SLOW-NEXT: movl 44(%ebp), %eax
+; X86-SLOW-NEXT: movl 40(%ebp), %ecx
+; X86-SLOW-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-SLOW-NEXT: movl 44(%ebp), %ecx
; X86-SLOW-NEXT: .LBB6_3:
-; X86-SLOW-NEXT: movl 56(%ebp), %ecx
-; X86-SLOW-NEXT: testb $32, %cl
+; X86-SLOW-NEXT: movl 56(%ebp), %ebx
+; X86-SLOW-NEXT: testb $32, %bl
; X86-SLOW-NEXT: je .LBB6_4
; X86-SLOW-NEXT: # %bb.5:
-; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SLOW-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl %ecx, %ebx
; X86-SLOW-NEXT: jmp .LBB6_6
; X86-SLOW-NEXT: .LBB6_4:
; X86-SLOW-NEXT: movl %edx, %esi
+; X86-SLOW-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT: movl %eax, %ebx
-; X86-SLOW-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-SLOW-NEXT: movl %ecx, %edi
+; X86-SLOW-NEXT: movl (%esp), %ebx # 4-byte Reload
; X86-SLOW-NEXT: .LBB6_6:
-; X86-SLOW-NEXT: shrl %cl, %eax
-; X86-SLOW-NEXT: movl %eax, %edx
-; X86-SLOW-NEXT: movl %ecx, %eax
-; X86-SLOW-NEXT: notb %al
-; X86-SLOW-NEXT: movl %ebx, %edi
-; X86-SLOW-NEXT: addl %ebx, %ebx
-; X86-SLOW-NEXT: movl %eax, %ecx
-; X86-SLOW-NEXT: shll %cl, %ebx
-; X86-SLOW-NEXT: orl %edx, %ebx
-; X86-SLOW-NEXT: movl %ebx, (%esp) # 4-byte Spill
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
-; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-SLOW-NEXT: shrl %cl, %edi
-; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-SLOW-NEXT: leal (%ebx,%ebx), %edx
-; X86-SLOW-NEXT: movl %eax, %ecx
-; X86-SLOW-NEXT: shll %cl, %edx
-; X86-SLOW-NEXT: orl %edi, %edx
+; X86-SLOW-NEXT: shrl %cl, %ebx
+; X86-SLOW-NEXT: movl %ecx, %edx
+; X86-SLOW-NEXT: notb %dl
+; X86-SLOW-NEXT: movl %edi, %eax
+; X86-SLOW-NEXT: addl %edi, %edi
+; X86-SLOW-NEXT: movl %edx, %ecx
+; X86-SLOW-NEXT: shll %cl, %edi
+; X86-SLOW-NEXT: orl %ebx, %edi
+; X86-SLOW-NEXT: movl %edi, (%esp) # 4-byte Spill
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-SLOW-NEXT: shrl %cl, %ebx
-; X86-SLOW-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: shrl %cl, %eax
; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-SLOW-NEXT: leal (%edi,%edi), %ebx
-; X86-SLOW-NEXT: movl %eax, %ecx
+; X86-SLOW-NEXT: movl %edx, %ecx
; X86-SLOW-NEXT: shll %cl, %ebx
-; X86-SLOW-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-SLOW-NEXT: orl %eax, %ebx
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-SLOW-NEXT: shrl %cl, %edi
+; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SLOW-NEXT: leal (%eax,%eax), %edi
+; X86-SLOW-NEXT: movl %edx, %ecx
+; X86-SLOW-NEXT: shll %cl, %edi
+; X86-SLOW-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-SLOW-NEXT: movl 56(%ebp), %ecx
+; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SLOW-NEXT: shrl %cl, %eax
; X86-SLOW-NEXT: addl %esi, %esi
-; X86-SLOW-NEXT: movl %eax, %ecx
+; X86-SLOW-NEXT: movl %edx, %ecx
; X86-SLOW-NEXT: shll %cl, %esi
-; X86-SLOW-NEXT: orl %edi, %esi
-; X86-SLOW-NEXT: movl 8(%ebp), %ecx
-; X86-SLOW-NEXT: movl %esi, 12(%ecx)
-; X86-SLOW-NEXT: movl %ebx, 8(%ecx)
-; X86-SLOW-NEXT: movl %edx, 4(%ecx)
-; X86-SLOW-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-SLOW-NEXT: movl %eax, (%ecx)
-; X86-SLOW-NEXT: movl %ecx, %eax
+; X86-SLOW-NEXT: orl %eax, %esi
+; X86-SLOW-NEXT: movl 8(%ebp), %eax
+; X86-SLOW-NEXT: movl %esi, 12(%eax)
+; X86-SLOW-NEXT: movl %edi, 8(%eax)
+; X86-SLOW-NEXT: movl %ebx, 4(%eax)
+; X86-SLOW-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-SLOW-NEXT: movl %ecx, (%eax)
; X86-SLOW-NEXT: leal -12(%ebp), %esp
; X86-SLOW-NEXT: popl %esi
; X86-SLOW-NEXT: popl %edi
diff --git a/llvm/test/CodeGen/X86/pr161693.ll b/llvm/test/CodeGen/X86/pr161693.ll
new file mode 100644
index 0000000..de8188f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr161693.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s
+
+define void @PR161693() #0 {
+; CHECK-LABEL: PR161693:
+; CHECK: # %bb.0: # %start
+; CHECK-NEXT: movzbl (%rax), %eax
+; CHECK-NEXT: andb $-33, %al
+; CHECK-NEXT: addb $-71, %al
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB0_1: # %loop
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: cmpb $-6, %al
+; CHECK-NEXT: setb %cl
+; CHECK-NEXT: leal (%rcx,%rcx), %edx
+; CHECK-NEXT: orb %cl, %dl
+; CHECK-NEXT: leal (,%rdx,4), %ecx
+; CHECK-NEXT: orb %dl, %cl
+; CHECK-NEXT: je .LBB0_1
+; CHECK-NEXT: # %bb.2: # %exit
+; CHECK-NEXT: retq
+start:
+ br label %loop
+
+loop:
+ %.val.i.i89 = load <16 x i8>, ptr poison, align 1
+ %.not49.i = icmp ult <16 x i8> zeroinitializer, splat (i8 -10)
+ %i = and <16 x i8> %.val.i.i89, splat (i8 -33)
+ %i1 = add <16 x i8> %i, splat (i8 -71)
+ %.not51.i = icmp ult <16 x i8> %i1, splat (i8 -6)
+ %.not46.i = and <16 x i1> %.not49.i, %.not51.i
+ %i2 = bitcast <16 x i1> %.not46.i to i16
+ %_0.i = icmp eq i16 %i2, 0
+ br i1 %_0.i, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+attributes #0 = { "target-features"="+soft-float" }
diff --git a/llvm/test/CodeGen/X86/sbb.ll b/llvm/test/CodeGen/X86/sbb.ll
index 78d609d..f5a3468 100644
--- a/llvm/test/CodeGen/X86/sbb.ll
+++ b/llvm/test/CodeGen/X86/sbb.ll
@@ -365,3 +365,32 @@ define i32 @uge_sext_add(i32 %0, i32 %1, i32 %2) {
%6 = add nsw i32 %5, %0
ret i32 %6
}
+
+define i32 @sub_sub_ugt(i32 %a, i32 %b) {
+; CHECK-LABEL: sub_sub_ugt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: cmpl %edi, %esi
+; CHECK-NEXT: sbbl %esi, %eax
+; CHECK-NEXT: retq
+ %cmp = icmp ugt i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ %sub = sub i32 %a, %b
+ %res = sub i32 %sub, %conv
+ ret i32 %res
+}
+
+define i32 @sub_sub_ult(i32 %a, i32 %b) {
+; CHECK-LABEL: sub_sub_ult:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: cmpl %edi, %esi
+; CHECK-NEXT: sbbl %esi, %eax
+; CHECK-NEXT: retq
+ %cmp = icmp ult i32 %b, %a
+ %conv = zext i1 %cmp to i32
+ %sub = sub i32 %a, %b
+ %res = sub i32 %sub, %conv
+ ret i32 %res
+}
+
diff --git a/llvm/test/CodeGen/X86/shift-i128.ll b/llvm/test/CodeGen/X86/shift-i128.ll
index 7462c77..049ee47 100644
--- a/llvm/test/CodeGen/X86/shift-i128.ll
+++ b/llvm/test/CodeGen/X86/shift-i128.ll
@@ -613,8 +613,7 @@ define void @test_shl_v2i128(<2 x i128> %x, <2 x i128> %a, ptr nocapture %r) nou
; i686-NEXT: shldl %cl, %esi, %ebx
; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; i686-NEXT: movl %edi, %esi
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; i686-NEXT: movl %eax, %ecx
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; i686-NEXT: shll %cl, %esi
; i686-NEXT: shldl %cl, %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; i686-NEXT: negl %edx
diff --git a/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir b/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir
index 5d644c3..718fa6f 100644
--- a/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir
+++ b/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir
@@ -366,7 +366,8 @@ frameInfo:
maxCallFrameSize: 0
localFrameSize: 144
machineFunctionInfo:
- stackSizeSVE: 0
+ stackSizeZPR: 0
+ stackSizePPR: 0
stack:
- { id: 0, name: StackGuardSlot, offset: -40, size: 8, alignment: 8,
stack-id: default, local-offset: -8 }
diff --git a/llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir b/llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir
index 013d933..b7a9892 100644
--- a/llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir
+++ b/llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir
@@ -69,7 +69,8 @@ frameInfo:
hasCalls: true
maxCallFrameSize: 0
machineFunctionInfo:
- stackSizeSVE: 0
+ stackSizeZPR: 0
+ stackSizePPR: 0
stack:
- { id: 0, type: spill-slot, offset: -20, size: 4, alignment: 4, stack-id: default }
- { id: 1, type: spill-slot, offset: -8, size: 8, alignment: 8, stack-id: default,
diff --git a/llvm/test/DebugInfo/X86/dynamic-bitfield.ll b/llvm/test/DebugInfo/X86/dynamic-bitfield.ll
index c9148ca4..f893597 100644
--- a/llvm/test/DebugInfo/X86/dynamic-bitfield.ll
+++ b/llvm/test/DebugInfo/X86/dynamic-bitfield.ll
@@ -27,7 +27,7 @@ source_filename = "bitfield.c"
!6 = !{}
!7 = !{!0, !2}
!8 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "PackedBits", file: !5, line: 3, size: 40, elements: !9)
-!9 = !{!10, !12, !16}
+!9 = !{!10, !12, !16, !21}
!10 = !DIDerivedType(tag: DW_TAG_member, name: "a", scope: !8, file: !5, line: 5, baseType: !11, size: 8)
; CHECK: DW_TAG_member
; CHECK-NEXT: DW_AT_name{{.*}}"a"
@@ -60,5 +60,14 @@ source_filename = "bitfield.c"
; CHECK: DW_AT_bit_size [DW_FORM_exprloc] (DW_OP_lit27)
; CHECK-NEXT: DW_AT_data_bit_offset [DW_FORM_exprloc] (DW_OP_lit13)
; CHECK-NOT: DW_AT_data_member_location
-; CHECK: DW_TAG
!20 = !{!"clang version 3.9.0 (trunk 267633)"}
+!21 = !DIDerivedType(tag: DW_TAG_member, name: "d", scope: !8, file: !5, line: 7, baseType: !13, offset: !DIExpression(DW_OP_constu, 15), flags: DIFlagBitField)
+; CHECK: DW_TAG_member
+; CHECK-NEXT: DW_AT_name{{.*}}"d"
+; CHECK-NOT: DW_TAG
+; CHECK-NOT: DW_AT_bit_offset
+; CHECK-NOT: DW_AT_byte_size
+; CHECK-NOT: DW_AT_bit_size
+; CHECK: DW_AT_data_bit_offset [DW_FORM_exprloc] (DW_OP_lit15)
+; CHECK-NOT: DW_AT_data_member_location
+; CHECK: DW_TAG
diff --git a/llvm/test/DebugInfo/X86/x86fixupsetcc-debug-instr-num.mir b/llvm/test/DebugInfo/X86/x86fixupsetcc-debug-instr-num.mir
new file mode 100644
index 0000000..b7149f0
--- /dev/null
+++ b/llvm/test/DebugInfo/X86/x86fixupsetcc-debug-instr-num.mir
@@ -0,0 +1,54 @@
+# RUN: llc %s --run-pass=x86-fixup-setcc -o - | FileCheck %s
+
+## Check the debug-isntr-number transfers from MOVZX32rr8 to the SETCC
+## after the mov is replaced with an INSERT_SUBREG, updating the substitutions
+## table.
+
+# CHECK: debugValueSubstitutions:
+# CHECK: - { srcinst: 1, srcop: 0, dstinst: 2, dstop: 0, subreg: 0 }
+
+# CHECK: %[[#]]:gr8 = SETCCr 15, implicit $eflags, debug-instr-number 2
+# CHECK: INSERT_SUBREG
+# CHECK-NOT: debug-instr-number
+# CHECK-NEXT: DBG_INSTR_REF ![[#]], !DIExpression(DW_OP_LLVM_arg, 0), dbg-instr-ref(1, 0)
+
+--- |
+ source_filename = "reduced.ll"
+ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-linux-gnu"
+
+ define i32 @main(i32 %call2) {
+ entry:
+ %cmp12 = icmp sgt i32 %call2, 0
+ %conv13 = zext i1 %cmp12 to i32
+ #dbg_value(i32 %conv13, !4, !DIExpression(), !8)
+ ret i32 %conv13
+ }
+
+ !llvm.dbg.cu = !{!0}
+ !llvm.module.flags = !{!3}
+
+ !0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 22.0.0git", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !2, splitDebugInlining: false, nameTableKind: None)
+ !1 = !DIFile(filename: "test.c", directory: "/")
+ !2 = !{}
+ !3 = !{i32 2, !"Debug Info Version", i32 3}
+ !4 = !DILocalVariable(name: "v_3", scope: !5, file: !1, line: 10, type: !7)
+ !5 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 5, type: !6, scopeLine: 6, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2, keyInstructions: true)
+ !6 = !DISubroutineType(types: !2)
+ !7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+ !8 = !DILocation(line: 0, scope: !5)
+...
+---
+name: main
+body: |
+ bb.0.entry:
+ liveins: $edi
+
+ %0:gr32 = COPY $edi
+ TEST32rr %0, %0, implicit-def $eflags
+ %1:gr8 = SETCCr 15, implicit $eflags
+ %2:gr32 = MOVZX32rr8 killed %1, debug-instr-number 1
+ DBG_INSTR_REF !4, !DIExpression(DW_OP_LLVM_arg, 0), dbg-instr-ref(1, 0), debug-location !8
+ $eax = COPY %2
+ RET 0, $eax
+...
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s b/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s
index 9296f04..ed76a28 100644
--- a/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s
+++ b/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s
@@ -22,7 +22,7 @@
# CHECK-OBJ: Contents of section .rodata:
# CHECK-OBJ: 0000 48310048 32004833 00 H1.H2.H3.
-# CHECK-LG: Starting link phase 1 for graph
+# CHECK-LG: Starting link phase 1
# CHECK-LG: section .rodata:
# CHECK-LG: block 0x0 size = 0x00000009, align = 1, alignment-offset = 0
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-0.s b/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-0.s
new file mode 100644
index 0000000..557e403
--- /dev/null
+++ b/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-0.s
@@ -0,0 +1,7 @@
+ .section __DATA,__data
+ .globl x
+ .p2align 2, 0x0
+x:
+ .long 0
+
+.subsections_via_symbols
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-1.s b/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-1.s
new file mode 100644
index 0000000..711c8a0
--- /dev/null
+++ b/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-1.s
@@ -0,0 +1,7 @@
+ .section __DATA,__data
+ .globl x
+ .p2align 2, 0x0
+x:
+ .long 1
+
+.subsections_via_symbols
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_universal_slice_selection.s b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_universal_slice_selection.s
new file mode 100644
index 0000000..c58f84e
--- /dev/null
+++ b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_universal_slice_selection.s
@@ -0,0 +1,32 @@
+# RUN: rm -rf %t && mkdir -p %t
+# RUN: llvm-mc -triple=arm64e-apple-darwin -filetype=obj -o %t/main.o %s
+# RUN: llvm-mc -triple=arm64-apple-darwin -filetype=obj -o %t/x.arm64.o \
+# RUN: %S/Inputs/x-1.s
+# RUN: llvm-ar crs %t/libX.arm64.a %t/x.arm64.o
+# RUN: llvm-mc -triple=arm64e-apple-darwin -filetype=obj -o %t/x.arm64e.o \
+# RUN: %S/Inputs/x-0.s
+# RUN: llvm-ar crs %t/libX.arm64e.a %t/x.arm64e.o
+# RUN: llvm-lipo --create --output %t/libX.a %t/libX.arm64.a %t/libX.arm64e.a
+# RUN: llvm-jitlink -noexec -check=%s %t/main.o -L%t -lX
+#
+# Create a universal archive with two slices (arm64e, arm64) each containing
+# a definition of X: in arm64e X = 0, in arm64 X = 1.
+# Check that if we load an arm64e object file then we link the arm64e slice
+# of the archive by verifying that X = 0.
+#
+
+# jitlink-check: *{4}x = 0
+
+ .section __TEXT,__text,regular,pure_instructions
+ .globl _main
+ .p2align 2
+_main:
+ mov w0, #0
+ ret
+
+ .section __DATA,__data
+ .globl p
+p:
+ .quad x
+
+.subsections_via_symbols
diff --git a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s
index 2b5c9e3..5f6babf 100644
--- a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s
+++ b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s
@@ -102,7 +102,7 @@ p:
call o
.size p, .-p
-# CHECK: Link graph "{{.*}}" before copy-and-fixup:
+# CHECK: Link graph before copy-and-fixup:
# CHECK: section .text:
# CHECK: block 0x1000
# CHECK: symbols:
diff --git a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s
index 3bbfd55..c31250b 100644
--- a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s
+++ b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s
@@ -131,7 +131,7 @@ p:
call o
.size p, .-p
-# CHECK: Link graph "{{.*}}" before copy-and-fixup:
+# CHECK: Link graph before copy-and-fixup:
# CHECK: section .text:
# CHECK: block 0x1000
# CHECK: symbols:
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll
index 99e9ab9..864f6a9 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll
@@ -877,7 +877,7 @@ define %struct.__neon_int8x16x2_t @ld2lane_16b(<16 x i8> %L1, <16 x i8> %L2, ptr
; CHECK-LABEL: define %struct.__neon_int8x16x2_t @ld2lane_16b(
; CHECK-SAME: <16 x i8> [[L1:%.*]], <16 x i8> [[L2:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -904,8 +904,8 @@ define %struct.__neon_int8x16x3_t @ld3lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16
; CHECK-LABEL: define %struct.__neon_int8x16x3_t @ld3lane_16b(
; CHECK-SAME: <16 x i8> [[L1:%.*]], <16 x i8> [[L2:%.*]], <16 x i8> [[L3:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -936,9 +936,9 @@ define %struct.__neon_int8x16x4_t @ld4lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16
; CHECK-LABEL: define %struct.__neon_int8x16x4_t @ld4lane_16b(
; CHECK-SAME: <16 x i8> [[L1:%.*]], <16 x i8> [[L2:%.*]], <16 x i8> [[L3:%.*]], <16 x i8> [[L4:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -977,7 +977,7 @@ define %struct.__neon_int16x8x2_t @ld2lane_8h(<8 x i16> %L1, <8 x i16> %L2, ptr
; CHECK-LABEL: define %struct.__neon_int16x8x2_t @ld2lane_8h(
; CHECK-SAME: <8 x i16> [[L1:%.*]], <8 x i16> [[L2:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -1004,8 +1004,8 @@ define %struct.__neon_int16x8x3_t @ld3lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x
; CHECK-LABEL: define %struct.__neon_int16x8x3_t @ld3lane_8h(
; CHECK-SAME: <8 x i16> [[L1:%.*]], <8 x i16> [[L2:%.*]], <8 x i16> [[L3:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1036,9 +1036,9 @@ define %struct.__neon_int16x8x4_t @ld4lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x
; CHECK-LABEL: define %struct.__neon_int16x8x4_t @ld4lane_8h(
; CHECK-SAME: <8 x i16> [[L1:%.*]], <8 x i16> [[L2:%.*]], <8 x i16> [[L3:%.*]], <8 x i16> [[L4:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -1077,7 +1077,7 @@ define %struct.__neon_int32x4x2_t @ld2lane_4s(<4 x i32> %L1, <4 x i32> %L2, ptr
; CHECK-LABEL: define %struct.__neon_int32x4x2_t @ld2lane_4s(
; CHECK-SAME: <4 x i32> [[L1:%.*]], <4 x i32> [[L2:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -1104,8 +1104,8 @@ define %struct.__neon_int32x4x3_t @ld3lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x
; CHECK-LABEL: define %struct.__neon_int32x4x3_t @ld3lane_4s(
; CHECK-SAME: <4 x i32> [[L1:%.*]], <4 x i32> [[L2:%.*]], <4 x i32> [[L3:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1136,9 +1136,9 @@ define %struct.__neon_int32x4x4_t @ld4lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x
; CHECK-LABEL: define %struct.__neon_int32x4x4_t @ld4lane_4s(
; CHECK-SAME: <4 x i32> [[L1:%.*]], <4 x i32> [[L2:%.*]], <4 x i32> [[L3:%.*]], <4 x i32> [[L4:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -1177,7 +1177,7 @@ define %struct.__neon_int64x2x2_t @ld2lane_2d(<2 x i64> %L1, <2 x i64> %L2, ptr
; CHECK-LABEL: define %struct.__neon_int64x2x2_t @ld2lane_2d(
; CHECK-SAME: <2 x i64> [[L1:%.*]], <2 x i64> [[L2:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -1204,8 +1204,8 @@ define %struct.__neon_int64x2x3_t @ld3lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x
; CHECK-LABEL: define %struct.__neon_int64x2x3_t @ld3lane_2d(
; CHECK-SAME: <2 x i64> [[L1:%.*]], <2 x i64> [[L2:%.*]], <2 x i64> [[L3:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1236,9 +1236,9 @@ define %struct.__neon_int64x2x4_t @ld4lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x
; CHECK-LABEL: define %struct.__neon_int64x2x4_t @ld4lane_2d(
; CHECK-SAME: <2 x i64> [[L1:%.*]], <2 x i64> [[L2:%.*]], <2 x i64> [[L3:%.*]], <2 x i64> [[L4:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -2304,7 +2304,7 @@ define <16 x i8> @ld1_16b(<16 x i8> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <16 x i8> @ld1_16b(
; CHECK-SAME: <16 x i8> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2332,7 +2332,7 @@ define <8 x i16> @ld1_8h(<8 x i16> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <8 x i16> @ld1_8h(
; CHECK-SAME: <8 x i16> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2360,7 +2360,7 @@ define <4 x i32> @ld1_4s(<4 x i32> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <4 x i32> @ld1_4s(
; CHECK-SAME: <4 x i32> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2388,7 +2388,7 @@ define <4 x float> @ld1_4s_float(<4 x float> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <4 x float> @ld1_4s_float(
; CHECK-SAME: <4 x float> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2416,7 +2416,7 @@ define <2 x i64> @ld1_2d(<2 x i64> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <2 x i64> @ld1_2d(
; CHECK-SAME: <2 x i64> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2444,7 +2444,7 @@ define <2 x double> @ld1_2d_double(<2 x double> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <2 x double> @ld1_2d_double(
; CHECK-SAME: <2 x double> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2496,7 +2496,7 @@ define <8 x i8> @ld1_8b(<8 x i8> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <8 x i8> @ld1_8b(
; CHECK-SAME: <8 x i8> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2524,7 +2524,7 @@ define <4 x i16> @ld1_4h(<4 x i16> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <4 x i16> @ld1_4h(
; CHECK-SAME: <4 x i16> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2552,7 +2552,7 @@ define <2 x i32> @ld1_2s(<2 x i32> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <2 x i32> @ld1_2s(
; CHECK-SAME: <2 x i32> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2580,7 +2580,7 @@ define <2 x float> @ld1_2s_float(<2 x float> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <2 x float> @ld1_2s_float(
; CHECK-SAME: <2 x float> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2611,8 +2611,8 @@ define void @ld1r_2s_from_dup(ptr nocapture %a, ptr nocapture %b, ptr nocapture
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]], ptr captures(none) [[DIFF:%.*]]) #[[ATTR2:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP0]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll
index 632268e..1319544 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll
@@ -122,7 +122,7 @@ define <8 x i8> @test_vmaxv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 {
; CHECK-LABEL: define <8 x i8> @test_vmaxv_s8_used_by_laneop(
; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP0]])
@@ -146,7 +146,7 @@ define <4 x i16> @test_vmaxv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0
; CHECK-LABEL: define <4 x i16> @test_vmaxv_s16_used_by_laneop(
; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP0]])
@@ -170,7 +170,7 @@ define <2 x i32> @test_vmaxv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0
; CHECK-LABEL: define <2 x i32> @test_vmaxv_s32_used_by_laneop(
; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]])
@@ -190,7 +190,7 @@ define <16 x i8> @test_vmaxvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0
; CHECK-LABEL: define <16 x i8> @test_vmaxvq_s8_used_by_laneop(
; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP0]])
@@ -214,7 +214,7 @@ define <8 x i16> @test_vmaxvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) #
; CHECK-LABEL: define <8 x i16> @test_vmaxvq_s16_used_by_laneop(
; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP0]])
@@ -238,7 +238,7 @@ define <4 x i32> @test_vmaxvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) #
; CHECK-LABEL: define <4 x i32> @test_vmaxvq_s32_used_by_laneop(
; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]])
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll
index 2670610..272a910f 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll
@@ -122,7 +122,7 @@ define <8 x i8> @test_vminv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 {
; CHECK-LABEL: define <8 x i8> @test_vminv_s8_used_by_laneop(
; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP0]])
@@ -146,7 +146,7 @@ define <4 x i16> @test_vminv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0
; CHECK-LABEL: define <4 x i16> @test_vminv_s16_used_by_laneop(
; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP0]])
@@ -170,7 +170,7 @@ define <2 x i32> @test_vminv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0
; CHECK-LABEL: define <2 x i32> @test_vminv_s32_used_by_laneop(
; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]])
@@ -190,7 +190,7 @@ define <16 x i8> @test_vminvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0
; CHECK-LABEL: define <16 x i8> @test_vminvq_s8_used_by_laneop(
; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP0]])
@@ -214,7 +214,7 @@ define <8 x i16> @test_vminvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) #
; CHECK-LABEL: define <8 x i16> @test_vminvq_s16_used_by_laneop(
; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP0]])
@@ -238,7 +238,7 @@ define <4 x i32> @test_vminvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) #
; CHECK-LABEL: define <4 x i32> @test_vminvq_s32_used_by_laneop(
; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]])
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1.ll
index deeb1d4..fedf45f 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1.ll
@@ -15,9 +15,9 @@ define void @st2_8b(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_memory {
;
; CHECK-LABEL: define void @st2_8b(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -41,8 +41,8 @@ define void @st2_8b_undefA(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_m
;
; CHECK-LABEL: define void @st2_8b_undefA(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -66,7 +66,7 @@ define void @st2_8b_undefB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_m
;
; CHECK-LABEL: define void @st2_8b_undefB(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
@@ -91,7 +91,7 @@ define void @st2_8b_undefAB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_
;
; CHECK-LABEL: define void @st2_8b_undefAB(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
@@ -115,10 +115,10 @@ define void @st3_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sani
;
; CHECK-LABEL: define void @st3_8b(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -142,9 +142,9 @@ define void @st3_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
;
; CHECK-LABEL: define void @st3_8b_undefA(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -168,9 +168,9 @@ define void @st3_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
;
; CHECK-LABEL: define void @st3_8b_undefB(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -194,9 +194,9 @@ define void @st3_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
;
; CHECK-LABEL: define void @st3_8b_undefC(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -220,8 +220,8 @@ define void @st3_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
;
; CHECK-LABEL: define void @st3_8b_undefAB(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -245,8 +245,8 @@ define void @st3_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
;
; CHECK-LABEL: define void @st3_8b_undefAC(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -270,7 +270,7 @@ define void @st3_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
;
; CHECK-LABEL: define void @st3_8b_undefBC(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
@@ -295,7 +295,7 @@ define void @st3_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) noun
;
; CHECK-LABEL: define void @st3_8b_undefABC(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
@@ -319,11 +319,11 @@ define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P)
;
; CHECK-LABEL: define void @st4_8b(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
@@ -347,10 +347,10 @@ define void @st4_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
;
; CHECK-LABEL: define void @st4_8b_undefA(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -374,10 +374,10 @@ define void @st4_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
;
; CHECK-LABEL: define void @st4_8b_undefB(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -401,10 +401,10 @@ define void @st4_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
;
; CHECK-LABEL: define void @st4_8b_undefC(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -428,10 +428,10 @@ define void @st4_8b_undefD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
;
; CHECK-LABEL: define void @st4_8b_undefD(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -455,9 +455,9 @@ define void @st4_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
;
; CHECK-LABEL: define void @st4_8b_undefAB(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -481,9 +481,9 @@ define void @st4_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
;
; CHECK-LABEL: define void @st4_8b_undefAC(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -507,9 +507,9 @@ define void @st4_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
;
; CHECK-LABEL: define void @st4_8b_undefBC(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -533,9 +533,9 @@ define void @st4_8b_undefBD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
;
; CHECK-LABEL: define void @st4_8b_undefBD(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -559,8 +559,8 @@ define void @st4_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
;
; CHECK-LABEL: define void @st4_8b_undefABC(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -584,8 +584,8 @@ define void @st4_8b_undefABD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
;
; CHECK-LABEL: define void @st4_8b_undefABD(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -609,8 +609,8 @@ define void @st4_8b_undefACD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
;
; CHECK-LABEL: define void @st4_8b_undefACD(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -634,7 +634,7 @@ define void @st4_8b_undefBCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
;
; CHECK-LABEL: define void @st4_8b_undefBCD(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
@@ -659,7 +659,7 @@ define void @st4_8b_undefABCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D
;
; CHECK-LABEL: define void @st4_8b_undefABCD(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
@@ -689,9 +689,9 @@ define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) nounwind sanitize_memor
;
; CHECK-LABEL: define void @st2_16b(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -715,10 +715,10 @@ define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind
;
; CHECK-LABEL: define void @st3_16b(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -742,11 +742,11 @@ define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr
;
; CHECK-LABEL: define void @st4_16b(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
@@ -776,9 +776,9 @@ define void @st2_4h(<4 x i16> %A, <4 x i16> %B, ptr %P) nounwind sanitize_memory
;
; CHECK-LABEL: define void @st2_4h(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -802,10 +802,10 @@ define void @st3_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %P) nounwind s
;
; CHECK-LABEL: define void @st3_4h(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -829,11 +829,11 @@ define void @st4_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr
;
; CHECK-LABEL: define void @st4_4h(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], <4 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
@@ -863,9 +863,9 @@ define void @st2_8h(<8 x i16> %A, <8 x i16> %B, ptr %P) nounwind sanitize_memory
;
; CHECK-LABEL: define void @st2_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -889,10 +889,10 @@ define void @st3_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %P) nounwind s
;
; CHECK-LABEL: define void @st3_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -916,11 +916,11 @@ define void @st4_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr
;
; CHECK-LABEL: define void @st4_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
@@ -950,9 +950,9 @@ define void @st2_2s(<2 x i32> %A, <2 x i32> %B, ptr %P) nounwind sanitize_memory
;
; CHECK-LABEL: define void @st2_2s(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -976,10 +976,10 @@ define void @st3_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %P) nounwind s
;
; CHECK-LABEL: define void @st3_2s(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -1003,11 +1003,11 @@ define void @st4_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr
;
; CHECK-LABEL: define void @st4_2s(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], <2 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
@@ -1035,9 +1035,9 @@ define void @st2_4s(<4 x i32> %A, <4 x i32> %B, ptr %P) nounwind sanitize_memory
;
; CHECK-LABEL: define void @st2_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1061,10 +1061,10 @@ define void @st3_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %P) nounwind s
;
; CHECK-LABEL: define void @st3_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -1088,11 +1088,11 @@ define void @st4_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr
;
; CHECK-LABEL: define void @st4_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
@@ -1123,9 +1123,9 @@ define void @st2_1d(<1 x i64> %A, <1 x i64> %B, ptr %P) nounwind sanitize_memory
;
; CHECK-LABEL: define void @st2_1d(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1149,10 +1149,10 @@ define void @st3_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %P) nounwind s
;
; CHECK-LABEL: define void @st3_1d(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -1176,11 +1176,11 @@ define void @st4_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr
;
; CHECK-LABEL: define void @st4_1d(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], <1 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
@@ -1210,9 +1210,9 @@ define void @st2_2d(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize_memory
;
; CHECK-LABEL: define void @st2_2d(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1236,8 +1236,8 @@ define void @st2_2d_undefA(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize
;
; CHECK-LABEL: define void @st2_2d_undefA(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -1261,7 +1261,7 @@ define void @st2_2d_undefB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize
;
; CHECK-LABEL: define void @st2_2d_undefB(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
@@ -1286,7 +1286,7 @@ define void @st2_2d_undefAB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitiz
;
; CHECK-LABEL: define void @st2_2d_undefAB(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
@@ -1310,10 +1310,10 @@ define void @st3_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind s
;
; CHECK-LABEL: define void @st3_2d(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -1337,9 +1337,9 @@ define void @st3_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
;
; CHECK-LABEL: define void @st3_2d_undefA(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1363,9 +1363,9 @@ define void @st3_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
;
; CHECK-LABEL: define void @st3_2d_undefB(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1389,9 +1389,9 @@ define void @st3_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
;
; CHECK-LABEL: define void @st3_2d_undefC(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1415,8 +1415,8 @@ define void @st3_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
;
; CHECK-LABEL: define void @st3_2d_undefAB(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -1440,8 +1440,8 @@ define void @st3_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
;
; CHECK-LABEL: define void @st3_2d_undefAC(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -1465,7 +1465,7 @@ define void @st3_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
;
; CHECK-LABEL: define void @st3_2d_undefBC(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
@@ -1490,7 +1490,7 @@ define void @st3_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) n
;
; CHECK-LABEL: define void @st3_2d_undefABC(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
@@ -1514,11 +1514,11 @@ define void @st4_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr
;
; CHECK-LABEL: define void @st4_2d(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
@@ -1546,10 +1546,10 @@ define void @st4_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
;
; CHECK-LABEL: define void @st4_2d_undefA(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -1573,10 +1573,10 @@ define void @st4_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
;
; CHECK-LABEL: define void @st4_2d_undefB(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -1600,10 +1600,10 @@ define void @st4_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
;
; CHECK-LABEL: define void @st4_2d_undefC(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -1627,10 +1627,10 @@ define void @st4_2d_undefD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
;
; CHECK-LABEL: define void @st4_2d_undefD(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -1654,9 +1654,9 @@ define void @st4_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
;
; CHECK-LABEL: define void @st4_2d_undefAB(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1680,9 +1680,9 @@ define void @st4_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
;
; CHECK-LABEL: define void @st4_2d_undefAC(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1706,9 +1706,9 @@ define void @st4_2d_undefAD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
;
; CHECK-LABEL: define void @st4_2d_undefAD(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1732,9 +1732,9 @@ define void @st4_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
;
; CHECK-LABEL: define void @st4_2d_undefBC(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1758,9 +1758,9 @@ define void @st4_2d_undefBD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
;
; CHECK-LABEL: define void @st4_2d_undefBD(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1784,9 +1784,9 @@ define void @st4_2d_undefCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
;
; CHECK-LABEL: define void @st4_2d_undefCD(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1810,8 +1810,8 @@ define void @st4_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
;
; CHECK-LABEL: define void @st4_2d_undefABC(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -1835,8 +1835,8 @@ define void @st4_2d_undefABD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
;
; CHECK-LABEL: define void @st4_2d_undefABD(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -1860,8 +1860,8 @@ define void @st4_2d_undefACD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
;
; CHECK-LABEL: define void @st4_2d_undefACD(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -1885,7 +1885,7 @@ define void @st4_2d_undefBCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
;
; CHECK-LABEL: define void @st4_2d_undefBCD(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
@@ -1910,7 +1910,7 @@ define void @st4_2d_undefABCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64
;
; CHECK-LABEL: define void @st4_2d_undefABCD(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_lane.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_lane.ll
index 9ed364d..0617c8c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_lane.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_lane.ll
@@ -13,7 +13,7 @@ target triple = "aarch64--linux-android9001"
define void @st1lane_16b(<16 x i8> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane_16b(
; CHECK-SAME: <16 x i8> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -21,7 +21,7 @@ define void @st1lane_16b(<16 x i8> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <16 x i8> [[TMP2]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <16 x i8> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
; CHECK-NEXT: unreachable
@@ -42,7 +42,7 @@ define void @st1lane_16b(<16 x i8> %A, ptr %D) sanitize_memory {
define void @st1lane0_16b(<16 x i8> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_16b(
; CHECK-SAME: <16 x i8> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -50,7 +50,7 @@ define void @st1lane0_16b(<16 x i8> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <16 x i8> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <16 x i8> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -71,7 +71,7 @@ define void @st1lane0_16b(<16 x i8> %A, ptr %D) sanitize_memory {
define void @st1lane0u_16b(<16 x i8> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0u_16b(
; CHECK-SAME: <16 x i8> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -79,7 +79,7 @@ define void @st1lane0u_16b(<16 x i8> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <16 x i8> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <16 x i8> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -100,8 +100,8 @@ define void @st1lane0u_16b(<16 x i8> %A, ptr %D) sanitize_memory {
define void @st1lane_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane_ro_16b(
; CHECK-SAME: <16 x i8> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -109,7 +109,7 @@ define void @st1lane_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <16 x i8> [[TMP3]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <16 x i8> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -130,8 +130,8 @@ define void @st1lane_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane0_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_ro_16b(
; CHECK-SAME: <16 x i8> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -139,7 +139,7 @@ define void @st1lane0_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) sanitize_memory
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <16 x i8> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <16 x i8> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -160,7 +160,7 @@ define void @st1lane0_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) sanitize_memory
define void @st1lane_8h(<8 x i16> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -168,7 +168,7 @@ define void @st1lane_8h(<8 x i16> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i16> [[TMP2]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i16> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -189,7 +189,7 @@ define void @st1lane_8h(<8 x i16> %A, ptr %D) sanitize_memory {
define void @st1lane0_8h(<8 x i16> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -197,7 +197,7 @@ define void @st1lane0_8h(<8 x i16> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i16> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i16> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -218,7 +218,7 @@ define void @st1lane0_8h(<8 x i16> %A, ptr %D) sanitize_memory {
define void @st1lane0u_8h(<8 x i16> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0u_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -226,7 +226,7 @@ define void @st1lane0u_8h(<8 x i16> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i16> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i16> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -247,8 +247,8 @@ define void @st1lane0u_8h(<8 x i16> %A, ptr %D) sanitize_memory {
define void @st1lane_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane_ro_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -256,7 +256,7 @@ define void @st1lane_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i16> [[TMP3]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i16> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -277,8 +277,8 @@ define void @st1lane_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane0_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_ro_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -286,7 +286,7 @@ define void @st1lane0_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i16> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i16> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -307,7 +307,7 @@ define void @st1lane0_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane_4s(<4 x i32> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -315,7 +315,7 @@ define void @st1lane_4s(<4 x i32> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i32> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -336,7 +336,7 @@ define void @st1lane_4s(<4 x i32> %A, ptr %D) sanitize_memory {
define void @st1lane0_4s(<4 x i32> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -344,7 +344,7 @@ define void @st1lane0_4s(<4 x i32> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i32> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -365,7 +365,7 @@ define void @st1lane0_4s(<4 x i32> %A, ptr %D) sanitize_memory {
define void @st1lane0u_4s(<4 x i32> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0u_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -373,7 +373,7 @@ define void @st1lane0u_4s(<4 x i32> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i32> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -394,8 +394,8 @@ define void @st1lane0u_4s(<4 x i32> %A, ptr %D) sanitize_memory {
define void @st1lane_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane_ro_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -403,7 +403,7 @@ define void @st1lane_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP3]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i32> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -424,8 +424,8 @@ define void @st1lane_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane0_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_ro_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -433,7 +433,7 @@ define void @st1lane0_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i32> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -454,7 +454,7 @@ define void @st1lane0_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane_4s_float(<4 x float> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane_4s_float(
; CHECK-SAME: <4 x float> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -462,7 +462,7 @@ define void @st1lane_4s_float(<4 x float> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x float> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -483,7 +483,7 @@ define void @st1lane_4s_float(<4 x float> %A, ptr %D) sanitize_memory {
define void @st1lane0_4s_float(<4 x float> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_4s_float(
; CHECK-SAME: <4 x float> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -491,7 +491,7 @@ define void @st1lane0_4s_float(<4 x float> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x float> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -512,7 +512,7 @@ define void @st1lane0_4s_float(<4 x float> %A, ptr %D) sanitize_memory {
define void @st1lane0u_4s_float(<4 x float> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0u_4s_float(
; CHECK-SAME: <4 x float> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -520,7 +520,7 @@ define void @st1lane0u_4s_float(<4 x float> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x float> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -541,8 +541,8 @@ define void @st1lane0u_4s_float(<4 x float> %A, ptr %D) sanitize_memory {
define void @st1lane_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane_ro_4s_float(
; CHECK-SAME: <4 x float> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -550,7 +550,7 @@ define void @st1lane_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) sanitize_m
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP3]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x float> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -571,8 +571,8 @@ define void @st1lane_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) sanitize_m
define void @st1lane0_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_ro_4s_float(
; CHECK-SAME: <4 x float> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -580,7 +580,7 @@ define void @st1lane0_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) sanitize_
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x float> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -601,7 +601,7 @@ define void @st1lane0_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) sanitize_
define void @st1lane_2d(<2 x i64> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane_2d(
; CHECK-SAME: <2 x i64> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -609,7 +609,7 @@ define void @st1lane_2d(<2 x i64> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP2]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i64> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -630,7 +630,7 @@ define void @st1lane_2d(<2 x i64> %A, ptr %D) sanitize_memory {
define void @st1lane0_2d(<2 x i64> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_2d(
; CHECK-SAME: <2 x i64> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -638,7 +638,7 @@ define void @st1lane0_2d(<2 x i64> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i64> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -659,7 +659,7 @@ define void @st1lane0_2d(<2 x i64> %A, ptr %D) sanitize_memory {
define void @st1lane0u_2d(<2 x i64> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0u_2d(
; CHECK-SAME: <2 x i64> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -667,7 +667,7 @@ define void @st1lane0u_2d(<2 x i64> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i64> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -688,8 +688,8 @@ define void @st1lane0u_2d(<2 x i64> %A, ptr %D) sanitize_memory {
define void @st1lane_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane_ro_2d(
; CHECK-SAME: <2 x i64> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -697,7 +697,7 @@ define void @st1lane_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP3]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i64> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -718,8 +718,8 @@ define void @st1lane_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane0_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_ro_2d(
; CHECK-SAME: <2 x i64> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -727,7 +727,7 @@ define void @st1lane0_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i64> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -748,7 +748,7 @@ define void @st1lane0_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane_2d_double(<2 x double> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane_2d_double(
; CHECK-SAME: <2 x double> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -756,7 +756,7 @@ define void @st1lane_2d_double(<2 x double> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP2]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x double> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -777,7 +777,7 @@ define void @st1lane_2d_double(<2 x double> %A, ptr %D) sanitize_memory {
define void @st1lane0_2d_double(<2 x double> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_2d_double(
; CHECK-SAME: <2 x double> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -785,7 +785,7 @@ define void @st1lane0_2d_double(<2 x double> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x double> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -806,7 +806,7 @@ define void @st1lane0_2d_double(<2 x double> %A, ptr %D) sanitize_memory {
define void @st1lane0u_2d_double(<2 x double> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0u_2d_double(
; CHECK-SAME: <2 x double> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -814,7 +814,7 @@ define void @st1lane0u_2d_double(<2 x double> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x double> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -835,8 +835,8 @@ define void @st1lane0u_2d_double(<2 x double> %A, ptr %D) sanitize_memory {
define void @st1lane_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane_ro_2d_double(
; CHECK-SAME: <2 x double> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -844,7 +844,7 @@ define void @st1lane_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) sanitize
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP3]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x double> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -865,8 +865,8 @@ define void @st1lane_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) sanitize
define void @st1lane0_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_ro_2d_double(
; CHECK-SAME: <2 x double> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -874,7 +874,7 @@ define void @st1lane0_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) sanitiz
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x double> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -895,7 +895,7 @@ define void @st1lane0_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) sanitiz
define void @st1lane_8b(<8 x i8> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane_8b(
; CHECK-SAME: <8 x i8> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -903,7 +903,7 @@ define void @st1lane_8b(<8 x i8> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i8> [[TMP2]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i8> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -924,8 +924,8 @@ define void @st1lane_8b(<8 x i8> %A, ptr %D) sanitize_memory {
define void @st1lane_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane_ro_8b(
; CHECK-SAME: <8 x i8> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -933,7 +933,7 @@ define void @st1lane_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i8> [[TMP3]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i8> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -954,8 +954,8 @@ define void @st1lane_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane0_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_ro_8b(
; CHECK-SAME: <8 x i8> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -963,7 +963,7 @@ define void @st1lane0_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i8> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i8> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -984,7 +984,7 @@ define void @st1lane0_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane_4h(<4 x i16> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane_4h(
; CHECK-SAME: <4 x i16> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -992,7 +992,7 @@ define void @st1lane_4h(<4 x i16> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i16> [[TMP2]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i16> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1013,7 +1013,7 @@ define void @st1lane_4h(<4 x i16> %A, ptr %D) sanitize_memory {
define void @st1lane0_4h(<4 x i16> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_4h(
; CHECK-SAME: <4 x i16> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -1021,7 +1021,7 @@ define void @st1lane0_4h(<4 x i16> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i16> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i16> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1042,7 +1042,7 @@ define void @st1lane0_4h(<4 x i16> %A, ptr %D) sanitize_memory {
define void @st1lane0u_4h(<4 x i16> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0u_4h(
; CHECK-SAME: <4 x i16> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -1050,7 +1050,7 @@ define void @st1lane0u_4h(<4 x i16> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i16> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i16> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1071,8 +1071,8 @@ define void @st1lane0u_4h(<4 x i16> %A, ptr %D) sanitize_memory {
define void @st1lane_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane_ro_4h(
; CHECK-SAME: <4 x i16> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -1080,7 +1080,7 @@ define void @st1lane_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i16> [[TMP3]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i16> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1101,8 +1101,8 @@ define void @st1lane_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane0_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_ro_4h(
; CHECK-SAME: <4 x i16> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -1110,7 +1110,7 @@ define void @st1lane0_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i16> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i16> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1131,7 +1131,7 @@ define void @st1lane0_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane_2s(<2 x i32> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane_2s(
; CHECK-SAME: <2 x i32> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -1139,7 +1139,7 @@ define void @st1lane_2s(<2 x i32> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i32> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1160,7 +1160,7 @@ define void @st1lane_2s(<2 x i32> %A, ptr %D) sanitize_memory {
define void @st1lane0_2s(<2 x i32> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_2s(
; CHECK-SAME: <2 x i32> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -1168,7 +1168,7 @@ define void @st1lane0_2s(<2 x i32> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i32> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1189,7 +1189,7 @@ define void @st1lane0_2s(<2 x i32> %A, ptr %D) sanitize_memory {
define void @st1lane0u_2s(<2 x i32> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0u_2s(
; CHECK-SAME: <2 x i32> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -1197,7 +1197,7 @@ define void @st1lane0u_2s(<2 x i32> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i32> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1218,8 +1218,8 @@ define void @st1lane0u_2s(<2 x i32> %A, ptr %D) sanitize_memory {
define void @st1lane_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane_ro_2s(
; CHECK-SAME: <2 x i32> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -1227,7 +1227,7 @@ define void @st1lane_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP3]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i32> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1248,8 +1248,8 @@ define void @st1lane_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane0_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_ro_2s(
; CHECK-SAME: <2 x i32> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -1257,7 +1257,7 @@ define void @st1lane0_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i32> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1278,7 +1278,7 @@ define void @st1lane0_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane_2s_float(<2 x float> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane_2s_float(
; CHECK-SAME: <2 x float> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -1286,7 +1286,7 @@ define void @st1lane_2s_float(<2 x float> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x float> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1307,7 +1307,7 @@ define void @st1lane_2s_float(<2 x float> %A, ptr %D) sanitize_memory {
define void @st1lane0_2s_float(<2 x float> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_2s_float(
; CHECK-SAME: <2 x float> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -1315,7 +1315,7 @@ define void @st1lane0_2s_float(<2 x float> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x float> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1336,7 +1336,7 @@ define void @st1lane0_2s_float(<2 x float> %A, ptr %D) sanitize_memory {
define void @st1lane0u_2s_float(<2 x float> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0u_2s_float(
; CHECK-SAME: <2 x float> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -1344,7 +1344,7 @@ define void @st1lane0u_2s_float(<2 x float> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x float> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1365,8 +1365,8 @@ define void @st1lane0u_2s_float(<2 x float> %A, ptr %D) sanitize_memory {
define void @st1lane_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane_ro_2s_float(
; CHECK-SAME: <2 x float> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -1374,7 +1374,7 @@ define void @st1lane_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) sanitize_m
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP3]], i32 1
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x float> [[A]], i32 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1395,8 +1395,8 @@ define void @st1lane_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) sanitize_m
define void @st1lane0_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_ro_2s_float(
; CHECK-SAME: <2 x float> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -1404,7 +1404,7 @@ define void @st1lane0_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) sanitize_
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x float> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1425,7 +1425,7 @@ define void @st1lane0_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) sanitize_
define void @st1lane0_1d(<1 x i64> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_1d(
; CHECK-SAME: <1 x i64> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -1433,7 +1433,7 @@ define void @st1lane0_1d(<1 x i64> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <1 x i64> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <1 x i64> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1454,7 +1454,7 @@ define void @st1lane0_1d(<1 x i64> %A, ptr %D) sanitize_memory {
define void @st1lane0u_1d(<1 x i64> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0u_1d(
; CHECK-SAME: <1 x i64> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -1462,7 +1462,7 @@ define void @st1lane0u_1d(<1 x i64> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <1 x i64> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <1 x i64> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1483,8 +1483,8 @@ define void @st1lane0u_1d(<1 x i64> %A, ptr %D) sanitize_memory {
define void @st1lane0_ro_1d(<1 x i64> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_ro_1d(
; CHECK-SAME: <1 x i64> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -1492,7 +1492,7 @@ define void @st1lane0_ro_1d(<1 x i64> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <1 x i64> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <1 x i64> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1513,7 +1513,7 @@ define void @st1lane0_ro_1d(<1 x i64> %A, ptr %D, i64 %offset) sanitize_memory {
define void @st1lane0_1d_double(<1 x double> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_1d_double(
; CHECK-SAME: <1 x double> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -1521,7 +1521,7 @@ define void @st1lane0_1d_double(<1 x double> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <1 x i64> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <1 x double> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1542,7 +1542,7 @@ define void @st1lane0_1d_double(<1 x double> %A, ptr %D) sanitize_memory {
define void @st1lane0u_1d_double(<1 x double> %A, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st1lane0u_1d_double(
; CHECK-SAME: <1 x double> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0
@@ -1550,7 +1550,7 @@ define void @st1lane0u_1d_double(<1 x double> %A, ptr %D) sanitize_memory {
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <1 x i64> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <1 x double> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1571,8 +1571,8 @@ define void @st1lane0u_1d_double(<1 x double> %A, ptr %D) sanitize_memory {
define void @st1lane0_ro_1d_double(<1 x double> %A, ptr %D, i64 %offset) sanitize_memory {
; CHECK-LABEL: define void @st1lane0_ro_1d_double(
; CHECK-SAME: <1 x double> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
@@ -1580,7 +1580,7 @@ define void @st1lane0_ro_1d_double(<1 x double> %A, ptr %D, i64 %offset) sanitiz
; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <1 x i64> [[TMP3]], i32 0
; CHECK-NEXT: [[TMP:%.*]] = extractelement <1 x double> [[A]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1601,16 +1601,16 @@ define void @st1lane0_ro_1d_double(<1 x double> %A, ptr %D, i64 %offset) sanitiz
define void @st2lane_16b(<16 x i8> %A, <16 x i8> %B, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st2lane_16b(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[D]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2lane.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], i64 1, ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1625,16 +1625,16 @@ define void @st2lane_16b(<16 x i8> %A, <16 x i8> %B, ptr %D) sanitize_memory {
define void @st2lane_8h(<8 x i16> %A, <8 x i16> %B, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st2lane_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[D]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2lane.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], i64 1, ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1649,16 +1649,16 @@ define void @st2lane_8h(<8 x i16> %A, <8 x i16> %B, ptr %D) sanitize_memory {
define void @st2lane_4s(<4 x i32> %A, <4 x i32> %B, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st2lane_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[D]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2lane.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], i64 1, ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1673,16 +1673,16 @@ define void @st2lane_4s(<4 x i32> %A, <4 x i32> %B, ptr %D) sanitize_memory {
define void @st2lane_2d(<2 x i64> %A, <2 x i64> %B, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st2lane_2d(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[D]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], i64 1, ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1702,17 +1702,17 @@ declare void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64>, <2 x i64>, i64, ptr)
define void @st3lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st3lane_16b(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[D]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3lane.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], i64 1, ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1727,17 +1727,17 @@ define void @st3lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %D) sanit
define void @st3lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st3lane_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[D]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3lane.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], i64 1, ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1752,17 +1752,17 @@ define void @st3lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %D) saniti
define void @st3lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st3lane_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[D]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3lane.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], i64 1, ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1777,17 +1777,17 @@ define void @st3lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %D) saniti
define void @st3lane_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %D) sanitize_memory {
; CHECK-LABEL: define void @st3lane_2d(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[D]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3lane.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], i64 1, ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1807,18 +1807,18 @@ declare void @llvm.aarch64.neon.st3lane.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>
define void @st4lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %E) sanitize_memory {
; CHECK-LABEL: define void @st4lane_16b(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], ptr [[E:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[E]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4lane.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], i64 1, ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1833,18 +1833,18 @@ define void @st4lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D,
define void @st4lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %E) sanitize_memory {
; CHECK-LABEL: define void @st4lane_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i16> [[D:%.*]], ptr [[E:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[E]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4lane.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], i64 1, ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1859,18 +1859,18 @@ define void @st4lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D,
define void @st4lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %E) sanitize_memory {
; CHECK-LABEL: define void @st4lane_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i32> [[D:%.*]], ptr [[E:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[E]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4lane.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], i64 1, ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1885,18 +1885,18 @@ define void @st4lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D,
define void @st4lane_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %E) sanitize_memory {
; CHECK-LABEL: define void @st4lane_2d(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[E:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[E]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4lane.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], i64 1, ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1913,5 +1913,5 @@ declare void @llvm.aarch64.neon.st4lane.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>
declare void @llvm.aarch64.neon.st4lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, ptr) nounwind readnone
declare void @llvm.aarch64.neon.st4lane.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, ptr) nounwind readnone
;.
-; CHECK: [[PROF0]] = !{!"branch_weights", i32 1, i32 1048575}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
;.
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_origins.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_origins.ll
index 5228381..a121df9 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_origins.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_origins.ll
@@ -17,12 +17,12 @@ define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) nounwind sanitize_memor
;
; CHECK-LABEL: define void @st2_16b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
@@ -50,7 +50,7 @@ define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) nounwind sanitize_memor
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP12]], i32 7
; CHECK-NEXT: store i32 [[TMP15]], ptr [[TMP22]], align 4
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP23:%.*]], label [[TMP24:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP23:%.*]], label [[TMP24:%.*]], !prof [[PROF1:![0-9]+]]
; CHECK: 23:
; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4:[0-9]+]]
; CHECK-NEXT: unreachable
@@ -67,14 +67,14 @@ define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind
;
; CHECK-LABEL: define void @st3_16b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 48), align 4
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
@@ -113,7 +113,7 @@ define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i32, ptr [[TMP14]], i32 11
; CHECK-NEXT: store i32 [[TMP20]], ptr [[TMP31]], align 4
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP32:%.*]], label [[TMP33:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP32:%.*]], label [[TMP33:%.*]], !prof [[PROF1]]
; CHECK: 32:
; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -130,16 +130,16 @@ define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr
;
; CHECK-LABEL: define void @st4_16b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 64), align 4
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
-; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4
+; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 48), align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP12:%.*]] = xor i64 [[TMP11]], 193514046488576
@@ -189,7 +189,7 @@ define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr
; CHECK-NEXT: [[TMP40:%.*]] = getelementptr i32, ptr [[TMP16]], i32 15
; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP40]], align 4
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP41:%.*]], label [[TMP42:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP41:%.*]], label [[TMP42:%.*]], !prof [[PROF1]]
; CHECK: 41:
; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
; CHECK-NEXT: unreachable
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-tbl.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-tbl.ll
index b0c71dc..3d6e7fa 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-tbl.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-tbl.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool build/bin/opt --version 2
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
; Test memory sanitizer instrumentation for Arm NEON tbl instructions.
;
; RUN: opt < %s -passes=msan -S | FileCheck %s
@@ -14,7 +14,7 @@ define <8 x i8> @tbl1_8b(<16 x i8> %A, <8 x i8> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @tbl1_8b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <8 x i8> [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> [[TMP1]], <8 x i8> [[B]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP2]], [[TMP3]]
@@ -30,7 +30,7 @@ define <16 x i8> @tbl1_16b(<16 x i8> %A, <16 x i8> %B) nounwind sanitize_memory
; CHECK-LABEL: define <16 x i8> @tbl1_16b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[B]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP2]], [[TMP3]]
@@ -46,8 +46,8 @@ define <8 x i8> @tbl2_8b(<16 x i8> %A, <16 x i8> %B, <8 x i8> %C) sanitize_memor
; CHECK-LABEL: define <8 x i8> @tbl2_8b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <8 x i8> [[C:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <8 x i8> [[C]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP3]], [[TMP4]]
@@ -63,8 +63,8 @@ define <16 x i8> @tbl2_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) sanitize_me
; CHECK-LABEL: define <16 x i8> @tbl2_16b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[C]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP3]], [[TMP4]]
@@ -80,9 +80,9 @@ define <8 x i8> @tbl3_8b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x i8> %D)
; CHECK-LABEL: define <8 x i8> @tbl3_8b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <8 x i8> [[D:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl3.v8i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <8 x i8> [[D]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP4]], [[TMP5]]
@@ -98,9 +98,9 @@ define <16 x i8> @tbl3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %
; CHECK-LABEL: define <16 x i8> @tbl3_16b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl3.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[D]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP4]], [[TMP5]]
@@ -116,10 +116,10 @@ define <8 x i8> @tbl4_8b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D,
; CHECK-LABEL: define <8 x i8> @tbl4_8b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], <8 x i8> [[E:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <8 x i8> [[E]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP5]], [[TMP6]]
@@ -135,10 +135,10 @@ define <16 x i8> @tbl4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %
; CHECK-LABEL: define <16 x i8> @tbl4_16b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], <16 x i8> [[E:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl4.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[E]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP5]], [[TMP6]]
@@ -156,9 +156,9 @@ define <8 x i8> @shuffled_tbl2_to_tbl4_v8i8(<16 x i8> %a, <16 x i8> %b, <16 x i8
; CHECK-LABEL: define <8 x i8> @shuffled_tbl2_to_tbl4_v8i8
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <8 x i8> <i8 0, i8 4, i8 8, i8 12, i8 -1, i8 -1, i8 -1, i8 -1>)
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> zeroinitializer, [[TMP5]]
@@ -183,9 +183,9 @@ define <16 x i8> @shuffled_tbl2_to_tbl4(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c
; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> zeroinitializer, [[TMP5]]
@@ -208,11 +208,11 @@ define <16 x i8> @shuffled_tbl2_to_tbl4(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c
define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d, i8 %v) sanitize_memory {
; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], i8 [[V:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <16 x i8> splat (i8 -1), i8 [[TMP1]], i32 0
; CHECK-NEXT: [[INS_0:%.*]] = insertelement <16 x i8> poison, i8 [[V]], i32 0
@@ -283,11 +283,11 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask(<16 x i8> %a, <16 x
define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask2(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d, i8 %v) sanitize_memory {
; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask2
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], i8 [[V:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[INS_0:%.*]] = insertelement <16 x i8> poison, i8 1, i32 0
; CHECK-NEXT: [[INS_1:%.*]] = insertelement <16 x i8> [[INS_0]], i8 1, i32 1
@@ -347,11 +347,11 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask2(<16 x i8> %a, <16 x
define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_second_mask(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d, i8 %v) sanitize_memory {
; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_second_mask
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], i8 [[V:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <16 x i8> splat (i8 -1), i8 [[TMP1]], i32 0
; CHECK-NEXT: [[INS_0:%.*]] = insertelement <16 x i8> poison, i8 [[V]], i32 0
@@ -423,11 +423,11 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_second_mask(<16 x i8> %a, <16 x
define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_second_mask2(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d, i8 %v) sanitize_memory {
; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_second_mask2
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], i8 [[V:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <16 x i8> splat (i8 -1), i8 [[TMP1]], i32 0
; CHECK-NEXT: [[INS_0:%.*]] = insertelement <16 x i8> poison, i8 [[V]], i32 0
@@ -500,9 +500,9 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_mixed_shuffle(<16 x i8> %a, <16 x i8> %b
; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_mixed_shuffle
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> zeroinitializer, [[TMP5]]
@@ -527,9 +527,9 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_mixed_tbl2_mask1(<16 x i8> %a, <16 x i8>
; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_mixed_tbl2_mask1
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 0, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> zeroinitializer, [[TMP5]]
@@ -554,9 +554,9 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_mixed_tbl2_mask2(<16 x i8> %a, <16 x i8>
; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_mixed_tbl2_mask2
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> zeroinitializer, [[TMP5]]
@@ -588,8 +588,8 @@ define <8 x i8> @tbx1_8b(<8 x i8> %A, <16 x i8> %B, <8 x i8> %C) nounwind saniti
; CHECK-LABEL: define <8 x i8> @tbx1_8b
; CHECK-SAME: (<8 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <8 x i8> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbx1.v8i8(<8 x i8> [[TMP1]], <16 x i8> [[TMP2]], <8 x i8> [[C]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP3]], [[TMP4]]
@@ -605,8 +605,8 @@ define <16 x i8> @tbx1_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) nounwind sa
; CHECK-LABEL: define <16 x i8> @tbx1_16b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbx1.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[C]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP3]], [[TMP4]]
@@ -622,9 +622,9 @@ define <8 x i8> @tbx2_8b(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x i8> %D) s
; CHECK-LABEL: define <8 x i8> @tbx2_8b
; CHECK-SAME: (<8 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <8 x i8> [[D:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbx2.v8i8(<8 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <8 x i8> [[D]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP4]], [[TMP5]]
@@ -640,9 +640,9 @@ define <16 x i8> @tbx2_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %
; CHECK-LABEL: define <16 x i8> @tbx2_16b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbx2.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[D]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP4]], [[TMP5]]
@@ -658,10 +658,10 @@ define <8 x i8> @tbx3_8b(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D,
; CHECK-LABEL: define <8 x i8> @tbx3_8b
; CHECK-SAME: (<8 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], <8 x i8> [[E:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbx3.v8i8(<8 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <8 x i8> [[E]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP5]], [[TMP6]]
@@ -677,10 +677,10 @@ define <16 x i8> @tbx3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %
; CHECK-LABEL: define <16 x i8> @tbx3_16b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], <16 x i8> [[E:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbx3.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[E]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP5]], [[TMP6]]
@@ -696,11 +696,11 @@ define <8 x i8> @tbx4_8b(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D,
; CHECK-LABEL: define <8 x i8> @tbx4_8b
; CHECK-SAME: (<8 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], <16 x i8> [[E:%.*]], <8 x i8> [[F:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbx4.v8i8(<8 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], <8 x i8> [[F]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP6]], [[TMP7]]
@@ -716,11 +716,11 @@ define <16 x i8> @tbx4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %
; CHECK-LABEL: define <16 x i8> @tbx4_16b
; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], <16 x i8> [[E:%.*]], <16 x i8> [[F:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbx4.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], <16 x i8> [[F]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP6]], [[TMP7]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll
index 95f11a0..7f42139 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll
@@ -216,7 +216,7 @@ define <8 x i8> @test_vmaxv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 {
; CHECK-LABEL: define <8 x i8> @test_vmaxv_u8_used_by_laneop(
; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP0]])
@@ -240,7 +240,7 @@ define <4 x i16> @test_vmaxv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0
; CHECK-LABEL: define <4 x i16> @test_vmaxv_u16_used_by_laneop(
; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP0]])
@@ -264,7 +264,7 @@ define <2 x i32> @test_vmaxv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0
; CHECK-LABEL: define <2 x i32> @test_vmaxv_u32_used_by_laneop(
; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]])
@@ -284,7 +284,7 @@ define <16 x i8> @test_vmaxvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0
; CHECK-LABEL: define <16 x i8> @test_vmaxvq_u8_used_by_laneop(
; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP0]])
@@ -308,7 +308,7 @@ define <8 x i16> @test_vmaxvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) #
; CHECK-LABEL: define <8 x i16> @test_vmaxvq_u16_used_by_laneop(
; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP0]])
@@ -332,7 +332,7 @@ define <4 x i32> @test_vmaxvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) #
; CHECK-LABEL: define <4 x i32> @test_vmaxvq_u32_used_by_laneop(
; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]])
@@ -356,3 +356,6 @@ declare i32 @llvm.aarch64.neon.umaxv.i32.v2i32(<2 x i32>) nounwind readnone
declare i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32>) nounwind readnone
attributes #0 = { sanitize_memory }
+;.
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
+;.
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll
index ad513956..441c21b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll
@@ -216,7 +216,7 @@ define <8 x i8> @test_vminv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 {
; CHECK-LABEL: define <8 x i8> @test_vminv_u8_used_by_laneop(
; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP0]])
@@ -240,7 +240,7 @@ define <4 x i16> @test_vminv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0
; CHECK-LABEL: define <4 x i16> @test_vminv_u16_used_by_laneop(
; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP0]])
@@ -264,7 +264,7 @@ define <2 x i32> @test_vminv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0
; CHECK-LABEL: define <2 x i32> @test_vminv_u32_used_by_laneop(
; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]])
@@ -284,7 +284,7 @@ define <16 x i8> @test_vminvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0
; CHECK-LABEL: define <16 x i8> @test_vminvq_u8_used_by_laneop(
; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP0]])
@@ -308,7 +308,7 @@ define <8 x i16> @test_vminvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) #
; CHECK-LABEL: define <8 x i16> @test_vminvq_u16_used_by_laneop(
; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP0]])
@@ -332,7 +332,7 @@ define <4 x i32> @test_vminvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) #
; CHECK-LABEL: define <4 x i32> @test_vminvq_u32_used_by_laneop(
; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]])
@@ -355,3 +355,6 @@ declare i32 @llvm.aarch64.neon.uminv.i32.v2i32(<2 x i32>) nounwind readnone
declare i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32>) nounwind readnone
attributes #0 = { sanitize_memory }
+;.
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
+;.
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vadd.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vadd.ll
index ad0856d..5338031 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vadd.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vadd.ll
@@ -17,7 +17,7 @@ define <8 x i8> @addhn8b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @addhn8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]]
@@ -65,7 +65,7 @@ define <4 x i16> @addhn4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @addhn4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -113,7 +113,7 @@ define <2 x i32> @addhn2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @addhn2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -161,7 +161,7 @@ define <16 x i8> @addhn2_16b(<8 x i16> %a, <8 x i16> %b) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @addhn2_16b(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -199,7 +199,7 @@ define <8 x i16> @addhn2_8h(<4 x i32> %a, <4 x i32> %b) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @addhn2_8h(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -237,7 +237,7 @@ define <4 x i32> @addhn2_4s(<2 x i64> %a, <2 x i64> %b) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @addhn2_4s(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -280,7 +280,7 @@ define <8 x i8> @raddhn8b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @raddhn8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -328,7 +328,7 @@ define <4 x i16> @raddhn4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @raddhn4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -376,7 +376,7 @@ define <2 x i32> @raddhn2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @raddhn2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -424,7 +424,7 @@ define <16 x i8> @raddhn2_16b(<8 x i16> %a, <8 x i16> %b) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @raddhn2_16b(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -462,7 +462,7 @@ define <8 x i16> @raddhn2_8h(<4 x i32> %a, <4 x i32> %b) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @raddhn2_8h(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -500,7 +500,7 @@ define <4 x i32> @raddhn2_4s(<2 x i64> %a, <2 x i64> %b) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @raddhn2_4s(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -542,7 +542,7 @@ define <8 x i16> @saddl8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @saddl8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -587,7 +587,7 @@ define <4 x i32> @saddl4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @saddl4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -632,7 +632,7 @@ define <2 x i64> @saddl2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i64> @saddl2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -677,9 +677,9 @@ define <8 x i16> @saddl2_8h(<16 x i8> %a, <16 x i8> %b, <2 x i64> %param1, <2 x
; CHECK-LABEL: define <8 x i16> @saddl2_8h(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <2 x i64> [[PARAM1:%.*]], <2 x i64> [[PARAM2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
; CHECK-NEXT: [[TMP:%.*]] = bitcast <16 x i8> [[A]] to <2 x i64>
@@ -718,9 +718,9 @@ define <4 x i32> @saddl2_4s(<8 x i16> %a, <8 x i16> %b, <2 x i64> %param1, <2 x
; CHECK-LABEL: define <4 x i32> @saddl2_4s(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <2 x i64> [[PARAM1:%.*]], <2 x i64> [[PARAM2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to <2 x i64>
; CHECK-NEXT: [[TMP:%.*]] = bitcast <8 x i16> [[A]] to <2 x i64>
@@ -759,9 +759,9 @@ define <2 x i64> @saddl2_2d(<4 x i32> %a, <4 x i32> %b, <2 x i64> %param1, <2 x
; CHECK-LABEL: define <2 x i64> @saddl2_2d(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <2 x i64> [[PARAM1:%.*]], <2 x i64> [[PARAM2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <2 x i64>
; CHECK-NEXT: [[TMP:%.*]] = bitcast <4 x i32> [[A]] to <2 x i64>
@@ -800,7 +800,7 @@ define <8 x i16> @uaddl8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @uaddl8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -845,7 +845,7 @@ define <4 x i32> @uaddl4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @uaddl4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -890,7 +890,7 @@ define <2 x i64> @uaddl2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i64> @uaddl2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -936,9 +936,9 @@ define <8 x i16> @uaddl2_8h(<16 x i8> %a, <16 x i8> %b, <2 x i64> %param1, <2 x
; CHECK-LABEL: define <8 x i16> @uaddl2_8h(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <2 x i64> [[PARAM1:%.*]], <2 x i64> [[PARAM2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
; CHECK-NEXT: [[TMP:%.*]] = bitcast <16 x i8> [[A]] to <2 x i64>
@@ -977,9 +977,9 @@ define <4 x i32> @uaddl2_4s(<8 x i16> %a, <8 x i16> %b, <2 x i64> %param1, <2 x
; CHECK-LABEL: define <4 x i32> @uaddl2_4s(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <2 x i64> [[PARAM1:%.*]], <2 x i64> [[PARAM2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to <2 x i64>
; CHECK-NEXT: [[TMP:%.*]] = bitcast <8 x i16> [[A]] to <2 x i64>
@@ -1018,9 +1018,9 @@ define <2 x i64> @uaddl2_2d(<4 x i32> %a, <4 x i32> %b, <2 x i64> %param1, <2 x
; CHECK-LABEL: define <2 x i64> @uaddl2_2d(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <2 x i64> [[PARAM1:%.*]], <2 x i64> [[PARAM2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <2 x i64>
; CHECK-NEXT: [[TMP:%.*]] = bitcast <4 x i32> [[A]] to <2 x i64>
@@ -1059,7 +1059,7 @@ define <8 x i16> @uaddw8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @uaddw8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP13:%.*]], !prof [[PROF1]]
@@ -1101,7 +1101,7 @@ define <4 x i32> @uaddw4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @uaddw4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP13:%.*]], !prof [[PROF1]]
@@ -1143,7 +1143,7 @@ define <2 x i64> @uaddw2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i64> @uaddw2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP13:%.*]], !prof [[PROF1]]
@@ -1185,8 +1185,8 @@ define <8 x i16> @uaddw2_8h(ptr %A, ptr %B, <16 x i8> %param1) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @uaddw2_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], <16 x i8> [[PARAM1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
@@ -1233,8 +1233,8 @@ define <4 x i32> @uaddw2_4s(ptr %A, ptr %B, <8 x i16> %param1) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @uaddw2_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], <8 x i16> [[PARAM1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
@@ -1281,8 +1281,8 @@ define <2 x i64> @uaddw2_2d(ptr %A, ptr %B, <4 x i32> %param1) nounwind #0 {
; CHECK-LABEL: define <2 x i64> @uaddw2_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], <4 x i32> [[PARAM1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
@@ -1329,7 +1329,7 @@ define <8 x i16> @saddw8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @saddw8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP13:%.*]], !prof [[PROF1]]
@@ -1371,7 +1371,7 @@ define <4 x i32> @saddw4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @saddw4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP13:%.*]], !prof [[PROF1]]
@@ -1413,7 +1413,7 @@ define <2 x i64> @saddw2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i64> @saddw2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP13:%.*]], !prof [[PROF1]]
@@ -1455,8 +1455,8 @@ define <8 x i16> @saddw2_8h(ptr %A, ptr %B, <16 x i8> %param1) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @saddw2_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], <16 x i8> [[PARAM1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
@@ -1503,8 +1503,8 @@ define <4 x i32> @saddw2_4s(ptr %A, ptr %B, <8 x i16> %param1) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @saddw2_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], <8 x i16> [[PARAM1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
@@ -1551,8 +1551,8 @@ define <2 x i64> @saddw2_2d(ptr %A, ptr %B, <4 x i32> %param1) nounwind #0 {
; CHECK-LABEL: define <2 x i64> @saddw2_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], <4 x i32> [[PARAM1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
@@ -1963,7 +1963,7 @@ define <4 x i16> @sadalp4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @sadalp4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]]
@@ -2008,7 +2008,7 @@ define <2 x i32> @sadalp2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @sadalp2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]]
@@ -2053,7 +2053,7 @@ define <8 x i16> @sadalp8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @sadalp8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]]
@@ -2098,7 +2098,7 @@ define <4 x i32> @sadalp4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @sadalp4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]]
@@ -2143,7 +2143,7 @@ define <2 x i64> @sadalp2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i64> @sadalp2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]]
@@ -2188,7 +2188,7 @@ define <4 x i16> @uadalp4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @uadalp4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]]
@@ -2233,7 +2233,7 @@ define <2 x i32> @uadalp2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @uadalp2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]]
@@ -2278,7 +2278,7 @@ define <8 x i16> @uadalp8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @uadalp8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]]
@@ -2323,7 +2323,7 @@ define <4 x i32> @uadalp4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @uadalp4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]]
@@ -2368,7 +2368,7 @@ define <2 x i64> @uadalp2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i64> @uadalp2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]]
@@ -2413,7 +2413,7 @@ define <8 x i8> @addp_8b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @addp_8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2454,7 +2454,7 @@ define <16 x i8> @addp_16b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @addp_16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2495,7 +2495,7 @@ define <4 x i16> @addp_4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @addp_4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2536,7 +2536,7 @@ define <8 x i16> @addp_8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @addp_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2577,7 +2577,7 @@ define <2 x i32> @addp_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @addp_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2618,7 +2618,7 @@ define <4 x i32> @addp_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @addp_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2659,7 +2659,7 @@ define <2 x i64> @addp_2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i64> @addp_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2708,7 +2708,7 @@ define <2 x float> @faddp_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x float> @faddp_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2749,7 +2749,7 @@ define <4 x float> @faddp_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x float> @faddp_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2790,7 +2790,7 @@ define <2 x double> @faddp_2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x double> @faddp_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2834,10 +2834,10 @@ declare <2 x double> @llvm.aarch64.neon.faddp.v2f64(<2 x double>, <2 x double>)
define <2 x i64> @uaddl_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 {
; CHECK-LABEL: define <2 x i64> @uaddl_duprhs(
; CHECK-SAME: <4 x i32> [[LHS:%.*]], i32 [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2:[0-9]+]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0
; CHECK-NEXT: [[RHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[RHS]], i32 0
@@ -2869,10 +2869,10 @@ define <2 x i64> @uaddl_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 x
define <2 x i64> @uaddl2_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 {
; CHECK-LABEL: define <2 x i64> @uaddl2_duprhs(
; CHECK-SAME: <4 x i32> [[LHS:%.*]], i32 [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0
; CHECK-NEXT: [[RHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[RHS]], i32 0
@@ -2904,10 +2904,10 @@ define <2 x i64> @uaddl2_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4
define <2 x i64> @saddl_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 {
; CHECK-LABEL: define <2 x i64> @saddl_duplhs(
; CHECK-SAME: i32 [[LHS:%.*]], <4 x i32> [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0
; CHECK-NEXT: [[LHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[LHS]], i32 0
@@ -2939,10 +2939,10 @@ define <2 x i64> @saddl_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 x
define <2 x i64> @saddl2_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 {
; CHECK-LABEL: define <2 x i64> @saddl2_duplhs(
; CHECK-SAME: i32 [[LHS:%.*]], <4 x i32> [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0
; CHECK-NEXT: [[LHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[LHS]], i32 0
@@ -2974,10 +2974,10 @@ define <2 x i64> @saddl2_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4
define <2 x i64> @usubl_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 {
; CHECK-LABEL: define <2 x i64> @usubl_duprhs(
; CHECK-SAME: <4 x i32> [[LHS:%.*]], i32 [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0
; CHECK-NEXT: [[RHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[RHS]], i32 0
@@ -3009,10 +3009,10 @@ define <2 x i64> @usubl_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 x
define <2 x i64> @usubl2_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 {
; CHECK-LABEL: define <2 x i64> @usubl2_duprhs(
; CHECK-SAME: <4 x i32> [[LHS:%.*]], i32 [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0
; CHECK-NEXT: [[RHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[RHS]], i32 0
@@ -3044,10 +3044,10 @@ define <2 x i64> @usubl2_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4
define <2 x i64> @ssubl_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 {
; CHECK-LABEL: define <2 x i64> @ssubl_duplhs(
; CHECK-SAME: i32 [[LHS:%.*]], <4 x i32> [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0
; CHECK-NEXT: [[LHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[LHS]], i32 0
@@ -3079,10 +3079,10 @@ define <2 x i64> @ssubl_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 x
define <2 x i64> @ssubl2_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 {
; CHECK-LABEL: define <2 x i64> @ssubl2_duplhs(
; CHECK-SAME: i32 [[LHS:%.*]], <4 x i32> [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0
; CHECK-NEXT: [[LHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[LHS]], i32 0
@@ -3115,7 +3115,7 @@ define <8 x i8> @addhn8b_natural(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @addhn8b_natural(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -3161,7 +3161,7 @@ define <4 x i16> @addhn4h_natural(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @addhn4h_natural(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -3207,7 +3207,7 @@ define <2 x i32> @addhn2s_natural(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @addhn2s_natural(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -3252,8 +3252,8 @@ define <2 x i32> @addhn2s_natural(ptr %A, ptr %B) nounwind #0 {
define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @addhn2_16b_natural(
; CHECK-SAME: <8 x i8> [[LOW:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -3302,8 +3302,8 @@ define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind #0
define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @addhn2_8h_natural(
; CHECK-SAME: <4 x i16> [[LOW:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -3352,8 +3352,8 @@ define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind #0
define <4 x i32> @addhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @addhn2_4s_natural(
; CHECK-SAME: <2 x i32> [[LOW:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -3403,9 +3403,9 @@ define <4 x i32> @addhn_addhn2_4s(ptr %A, ptr %B, ptr %C, ptr %D) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @addhn_addhn2_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -3488,7 +3488,7 @@ define <8 x i8> @subhn8b_natural(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @subhn8b_natural(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -3534,7 +3534,7 @@ define <4 x i16> @subhn4h_natural(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @subhn4h_natural(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -3580,7 +3580,7 @@ define <2 x i32> @subhn2s_natural(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @subhn2s_natural(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -3625,8 +3625,8 @@ define <2 x i32> @subhn2s_natural(ptr %A, ptr %B) nounwind #0 {
define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @subhn2_16b_natural(
; CHECK-SAME: <8 x i8> [[LOW:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -3675,8 +3675,8 @@ define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind #0
define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @subhn2_8h_natural(
; CHECK-SAME: <4 x i16> [[LOW:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -3725,8 +3725,8 @@ define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind #0
define <4 x i32> @subhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @subhn2_4s_natural(
; CHECK-SAME: <2 x i32> [[LOW:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vaddv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vaddv.ll
index 3a2ecfe..4ee7e4f 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vaddv.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vaddv.ll
@@ -30,7 +30,7 @@ define <8 x i8> @test_vaddv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 {
; CHECK-LABEL: define <8 x i8> @test_vaddv_s8_used_by_laneop(
; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP0]])
@@ -74,7 +74,7 @@ define <4 x i16> @test_vaddv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0
; CHECK-LABEL: define <4 x i16> @test_vaddv_s16_used_by_laneop(
; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP0]])
@@ -115,7 +115,7 @@ define <2 x i32> @test_vaddv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0
; CHECK-LABEL: define <2 x i32> @test_vaddv_s32_used_by_laneop(
; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]])
@@ -151,7 +151,7 @@ define <2 x i64> @test_vaddv_s64_used_by_laneop(<2 x i64> %a1, <2 x i64> %a2) #0
; CHECK-LABEL: define <2 x i64> @test_vaddv_s64_used_by_laneop(
; CHECK-SAME: <2 x i64> [[A1:%.*]], <2 x i64> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[TMP0]])
@@ -191,7 +191,7 @@ define <8 x i8> @test_vaddv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 {
; CHECK-LABEL: define <8 x i8> @test_vaddv_u8_used_by_laneop(
; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP0]])
@@ -259,7 +259,7 @@ define <4 x i16> @test_vaddv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0
; CHECK-LABEL: define <4 x i16> @test_vaddv_u16_used_by_laneop(
; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP0]])
@@ -324,7 +324,7 @@ define <2 x i32> @test_vaddv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0
; CHECK-LABEL: define <2 x i32> @test_vaddv_u32_used_by_laneop(
; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]])
@@ -408,7 +408,7 @@ define <2 x i64> @test_vaddv_u64_used_by_laneop(<2 x i64> %a1, <2 x i64> %a2) #0
; CHECK-LABEL: define <2 x i64> @test_vaddv_u64_used_by_laneop(
; CHECK-SAME: <2 x i64> [[A1:%.*]], <2 x i64> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[TMP0]])
@@ -429,7 +429,7 @@ define <1 x i64> @test_vaddv_u64_to_vec(<2 x i64> %a1, <1 x i64> %param1) #0 {
; CHECK-SAME: <2 x i64> [[A1:%.*]], <1 x i64> [[PARAM1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[TMP0]])
; CHECK-NEXT: [[VADDV_I:%.*]] = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> [[A1]])
@@ -468,7 +468,7 @@ define <16 x i8> @test_vaddvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0
; CHECK-LABEL: define <16 x i8> @test_vaddvq_s8_used_by_laneop(
; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP0]])
@@ -512,7 +512,7 @@ define <8 x i16> @test_vaddvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) #
; CHECK-LABEL: define <8 x i16> @test_vaddvq_s16_used_by_laneop(
; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP0]])
@@ -552,7 +552,7 @@ define <4 x i32> @test_vaddvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) #
; CHECK-LABEL: define <4 x i32> @test_vaddvq_s32_used_by_laneop(
; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]])
@@ -592,7 +592,7 @@ define <16 x i8> @test_vaddvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0
; CHECK-LABEL: define <16 x i8> @test_vaddvq_u8_used_by_laneop(
; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP0]])
@@ -636,7 +636,7 @@ define <8 x i16> @test_vaddvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) #
; CHECK-LABEL: define <8 x i16> @test_vaddvq_u16_used_by_laneop(
; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP0]])
@@ -676,7 +676,7 @@ define <4 x i32> @test_vaddvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) #
; CHECK-LABEL: define <4 x i32> @test_vaddvq_u32_used_by_laneop(
; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]])
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vcvt.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vcvt.ll
index 93a75df..03f6113 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vcvt.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vcvt.ll
@@ -1083,7 +1083,7 @@ define <2 x float> @fcvtxn_2s(<2 x double> %A) nounwind #0 {
define <4 x float> @fcvtxn_4s(<2 x float> %ret, <2 x double> %A) nounwind #0 {
; CHECK-LABEL: define <4 x float> @fcvtxn_4s(
; CHECK-SAME: <2 x float> [[RET:%.*]], <2 x double> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP1]], zeroinitializer
@@ -1358,7 +1358,7 @@ define void @autogen_SD28458(<8 x double> %val.f64, ptr %addr.f32) #0 {
; CHECK-LABEL: define void @autogen_SD28458(
; CHECK-SAME: <8 x double> [[VAL_F64:%.*]], ptr [[ADDR_F32:%.*]]) #[[ATTR3:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32>
; CHECK-NEXT: [[TR53:%.*]] = fptrunc <8 x double> [[VAL_F64]] to <8 x float>
@@ -1383,7 +1383,7 @@ define void @autogen_SD28458(<8 x double> %val.f64, ptr %addr.f32) #0 {
define void @autogen_SD19225(ptr %addr.f64, ptr %addr.f32) #0 {
; CHECK-LABEL: define void @autogen_SD19225(
; CHECK-SAME: ptr [[ADDR_F64:%.*]], ptr [[ADDR_F32:%.*]]) #[[ATTR3]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmax.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmax.ll
index e2457c0..d6d8895 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmax.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmax.ll
@@ -29,7 +29,7 @@ define <8 x i8> @smax_8b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @smax_8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1:![0-9]+]]
@@ -68,7 +68,7 @@ define <16 x i8> @smax_16b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @smax_16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -107,7 +107,7 @@ define <4 x i16> @smax_4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @smax_4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -146,7 +146,7 @@ define <8 x i16> @smax_8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @smax_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -185,7 +185,7 @@ define <2 x i32> @smax_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @smax_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -224,7 +224,7 @@ define <4 x i32> @smax_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @smax_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -270,7 +270,7 @@ define <8 x i8> @umax_8b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @umax_8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -309,7 +309,7 @@ define <16 x i8> @umax_16b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @umax_16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -348,7 +348,7 @@ define <4 x i16> @umax_4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @umax_4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -387,7 +387,7 @@ define <8 x i16> @umax_8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @umax_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -426,7 +426,7 @@ define <2 x i32> @umax_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @umax_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -465,7 +465,7 @@ define <4 x i32> @umax_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @umax_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -511,7 +511,7 @@ define <8 x i8> @smin_8b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @smin_8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -550,7 +550,7 @@ define <16 x i8> @smin_16b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @smin_16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -589,7 +589,7 @@ define <4 x i16> @smin_4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @smin_4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -628,7 +628,7 @@ define <8 x i16> @smin_8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @smin_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -667,7 +667,7 @@ define <2 x i32> @smin_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @smin_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -706,7 +706,7 @@ define <4 x i32> @smin_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @smin_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -752,7 +752,7 @@ define <8 x i8> @umin_8b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @umin_8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -791,7 +791,7 @@ define <16 x i8> @umin_16b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @umin_16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -830,7 +830,7 @@ define <4 x i16> @umin_4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @umin_4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -869,7 +869,7 @@ define <8 x i16> @umin_8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @umin_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -908,7 +908,7 @@ define <2 x i32> @umin_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @umin_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -947,7 +947,7 @@ define <4 x i32> @umin_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @umin_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -994,7 +994,7 @@ define <8 x i8> @smaxp_8b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @smaxp_8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1035,7 +1035,7 @@ define <16 x i8> @smaxp_16b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @smaxp_16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1076,7 +1076,7 @@ define <4 x i16> @smaxp_4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @smaxp_4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1117,7 +1117,7 @@ define <8 x i16> @smaxp_8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @smaxp_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1158,7 +1158,7 @@ define <2 x i32> @smaxp_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @smaxp_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1199,7 +1199,7 @@ define <4 x i32> @smaxp_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @smaxp_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1247,7 +1247,7 @@ define <8 x i8> @umaxp_8b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @umaxp_8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1288,7 +1288,7 @@ define <16 x i8> @umaxp_16b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @umaxp_16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1329,7 +1329,7 @@ define <4 x i16> @umaxp_4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @umaxp_4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1370,7 +1370,7 @@ define <8 x i16> @umaxp_8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @umaxp_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1411,7 +1411,7 @@ define <2 x i32> @umaxp_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @umaxp_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1452,7 +1452,7 @@ define <4 x i32> @umaxp_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @umaxp_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1501,7 +1501,7 @@ define <8 x i8> @sminp_8b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @sminp_8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1542,7 +1542,7 @@ define <16 x i8> @sminp_16b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @sminp_16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1583,7 +1583,7 @@ define <4 x i16> @sminp_4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @sminp_4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1624,7 +1624,7 @@ define <8 x i16> @sminp_8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @sminp_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1665,7 +1665,7 @@ define <2 x i32> @sminp_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @sminp_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1706,7 +1706,7 @@ define <4 x i32> @sminp_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @sminp_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1754,7 +1754,7 @@ define <8 x i8> @uminp_8b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i8> @uminp_8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1795,7 +1795,7 @@ define <16 x i8> @uminp_16b(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @uminp_16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1836,7 +1836,7 @@ define <4 x i16> @uminp_4h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i16> @uminp_4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1877,7 +1877,7 @@ define <8 x i16> @uminp_8h(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @uminp_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1918,7 +1918,7 @@ define <2 x i32> @uminp_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x i32> @uminp_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1959,7 +1959,7 @@ define <4 x i32> @uminp_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @uminp_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2007,7 +2007,7 @@ define <2 x float> @fmax_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x float> @fmax_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2046,7 +2046,7 @@ define <4 x float> @fmax_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x float> @fmax_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2085,7 +2085,7 @@ define <2 x double> @fmax_2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x double> @fmax_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2128,7 +2128,7 @@ define <2 x float> @fmaxp_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x float> @fmaxp_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2169,7 +2169,7 @@ define <4 x float> @fmaxp_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x float> @fmaxp_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2210,7 +2210,7 @@ define <2 x double> @fmaxp_2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x double> @fmaxp_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2255,7 +2255,7 @@ define <2 x float> @fmin_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x float> @fmin_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2294,7 +2294,7 @@ define <4 x float> @fmin_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x float> @fmin_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2333,7 +2333,7 @@ define <2 x double> @fmin_2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x double> @fmin_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2376,7 +2376,7 @@ define <2 x float> @fminp_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x float> @fminp_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2417,7 +2417,7 @@ define <4 x float> @fminp_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x float> @fminp_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2458,7 +2458,7 @@ define <2 x double> @fminp_2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x double> @fminp_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2503,7 +2503,7 @@ define <2 x float> @fminnmp_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x float> @fminnmp_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2544,7 +2544,7 @@ define <4 x float> @fminnmp_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x float> @fminnmp_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2585,7 +2585,7 @@ define <2 x double> @fminnmp_2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x double> @fminnmp_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2630,7 +2630,7 @@ define <2 x float> @fmaxnmp_2s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x float> @fmaxnmp_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2671,7 +2671,7 @@ define <4 x float> @fmaxnmp_4s(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <4 x float> @fmaxnmp_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2712,7 +2712,7 @@ define <2 x double> @fmaxnmp_2d(ptr %A, ptr %B) nounwind #0 {
; CHECK-LABEL: define <2 x double> @fmaxnmp_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -2754,3 +2754,6 @@ declare <4 x float> @llvm.aarch64.neon.fmaxnmp.v4f32(<4 x float>, <4 x float>) n
declare <2 x double> @llvm.aarch64.neon.fmaxnmp.v2f64(<2 x double>, <2 x double>) nounwind readnone
attributes #0 = { sanitize_memory }
+;.
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
+;.
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmovn.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmovn.ll
index 8e9110f..ced0138 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmovn.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmovn.ll
@@ -51,7 +51,7 @@ define <2 x i32> @xtn2s(<2 x i64> %A) nounwind #0 {
define <16 x i8> @xtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @xtn2_16b(
; CHECK-SAME: <8 x i8> [[RET:%.*]], <8 x i16> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <8 x i16> [[TMP1]] to <8 x i8>
@@ -69,7 +69,7 @@ define <16 x i8> @xtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 {
define <8 x i16> @xtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @xtn2_8h(
; CHECK-SAME: <4 x i16> [[RET:%.*]], <4 x i32> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <4 x i32> [[TMP1]] to <4 x i16>
@@ -87,7 +87,7 @@ define <8 x i16> @xtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 {
define <4 x i32> @xtn2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @xtn2_4s(
; CHECK-SAME: <2 x i32> [[RET:%.*]], <2 x i64> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <2 x i64> [[TMP1]] to <2 x i32>
@@ -150,7 +150,7 @@ define <2 x i32> @sqxtn2s(<2 x i64> %A) nounwind #0 {
define <16 x i8> @sqxtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @sqxtn2_16b(
; CHECK-SAME: <8 x i8> [[RET:%.*]], <8 x i16> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i16> [[TMP1]], zeroinitializer
@@ -169,7 +169,7 @@ define <16 x i8> @sqxtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 {
define <8 x i16> @sqxtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @sqxtn2_8h(
; CHECK-SAME: <4 x i16> [[RET:%.*]], <4 x i32> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP1]], zeroinitializer
@@ -188,7 +188,7 @@ define <8 x i16> @sqxtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 {
define <4 x i32> @sqxtn2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @sqxtn2_4s(
; CHECK-SAME: <2 x i32> [[RET:%.*]], <2 x i64> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
@@ -256,7 +256,7 @@ define <2 x i32> @uqxtn2s(<2 x i64> %A) nounwind #0 {
define <16 x i8> @uqxtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @uqxtn2_16b(
; CHECK-SAME: <8 x i8> [[RET:%.*]], <8 x i16> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i16> [[TMP1]], zeroinitializer
@@ -275,7 +275,7 @@ define <16 x i8> @uqxtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 {
define <8 x i16> @uqxtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @uqxtn2_8h(
; CHECK-SAME: <4 x i16> [[RET:%.*]], <4 x i32> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP1]], zeroinitializer
@@ -294,7 +294,7 @@ define <8 x i16> @uqxtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 {
define <4 x i32> @uqxtn2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @uqxtn2_4s(
; CHECK-SAME: <2 x i32> [[RET:%.*]], <2 x i64> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
@@ -362,7 +362,7 @@ define <2 x i32> @sqxtun2s(<2 x i64> %A) nounwind #0 {
define <16 x i8> @sqxtun2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @sqxtun2_16b(
; CHECK-SAME: <8 x i8> [[RET:%.*]], <8 x i16> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i16> [[TMP1]], zeroinitializer
@@ -381,7 +381,7 @@ define <16 x i8> @sqxtun2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 {
define <8 x i16> @sqxtun2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 {
; CHECK-LABEL: define <8 x i16> @sqxtun2_8h(
; CHECK-SAME: <4 x i16> [[RET:%.*]], <4 x i32> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP1]], zeroinitializer
@@ -400,7 +400,7 @@ define <8 x i16> @sqxtun2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 {
define <4 x i32> @sqxtun2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @sqxtun2_4s(
; CHECK-SAME: <2 x i32> [[RET:%.*]], <2 x i64> [[A:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmul.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmul.ll
index 38d6669..e9bb743 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmul.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmul.ll
@@ -13,7 +13,7 @@ define <8 x i16> @smull8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @smull8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1:![0-9]+]]
@@ -54,7 +54,7 @@ define <4 x i32> @smull4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @smull4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -95,7 +95,7 @@ define <2 x i64> @smull2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @smull2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -140,7 +140,7 @@ define <8 x i16> @umull8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @umull8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -181,7 +181,7 @@ define <4 x i32> @umull4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @umull4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -222,7 +222,7 @@ define <2 x i64> @umull2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @umull2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -267,7 +267,7 @@ define <4 x i32> @sqdmull4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqdmull4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -315,7 +315,7 @@ define <2 x i64> @sqdmull2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @sqdmull2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -363,7 +363,7 @@ define <4 x i32> @sqdmull2_4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqdmull2_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -417,7 +417,7 @@ define <2 x i64> @sqdmull2_2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @sqdmull2_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -475,7 +475,7 @@ define <8 x i16> @pmull8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @pmull8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -518,7 +518,7 @@ define <4 x i16> @sqdmulh_4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @sqdmulh_4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -557,7 +557,7 @@ define <8 x i16> @sqdmulh_8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @sqdmulh_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -596,7 +596,7 @@ define <2 x i32> @sqdmulh_2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @sqdmulh_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -635,7 +635,7 @@ define <4 x i32> @sqdmulh_4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqdmulh_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -674,7 +674,7 @@ define i32 @sqdmulh_1s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define i32 @sqdmulh_1s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -719,7 +719,7 @@ define <4 x i16> @sqrdmulh_4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @sqrdmulh_4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -758,7 +758,7 @@ define <8 x i16> @sqrdmulh_8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @sqrdmulh_8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -797,7 +797,7 @@ define <2 x i32> @sqrdmulh_2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @sqrdmulh_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -836,7 +836,7 @@ define <4 x i32> @sqrdmulh_4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqrdmulh_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -875,7 +875,7 @@ define i32 @sqrdmulh_1s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define i32 @sqrdmulh_1s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -920,7 +920,7 @@ define <2 x float> @fmulx_2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x float> @fmulx_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -960,7 +960,7 @@ define <4 x float> @fmulx_4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x float> @fmulx_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1000,7 +1000,7 @@ define <2 x double> @fmulx_2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x double> @fmulx_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
@@ -1044,8 +1044,8 @@ define <4 x i32> @smlal4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @smlal4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1101,8 +1101,8 @@ define <2 x i64> @smlal2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @smlal2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1208,8 +1208,8 @@ define <4 x i32> @smlsl4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @smlsl4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1265,8 +1265,8 @@ define <2 x i64> @smlsl2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @smlsl2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1377,8 +1377,8 @@ define <4 x i32> @sqdmlal4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqdmlal4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1441,8 +1441,8 @@ define <2 x i64> @sqdmlal2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @sqdmlal2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1505,8 +1505,8 @@ define <4 x i32> @sqdmlal2_4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqdmlal2_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1575,8 +1575,8 @@ define <2 x i64> @sqdmlal2_2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @sqdmlal2_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1645,8 +1645,8 @@ define <4 x i32> @sqdmlsl4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqdmlsl4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1709,8 +1709,8 @@ define <2 x i64> @sqdmlsl2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @sqdmlsl2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1773,8 +1773,8 @@ define <4 x i32> @sqdmlsl2_4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqdmlsl2_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1843,8 +1843,8 @@ define <2 x i64> @sqdmlsl2_2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @sqdmlsl2_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1913,8 +1913,8 @@ define <4 x i32> @umlal4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @umlal4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1970,8 +1970,8 @@ define <2 x i64> @umlal2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @umlal2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -2077,8 +2077,8 @@ define <4 x i32> @umlsl4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @umlsl4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -2134,8 +2134,8 @@ define <2 x i64> @umlsl2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @umlsl2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -2241,8 +2241,8 @@ define <2 x float> @fmla_2s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x float> @fmla_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -2294,8 +2294,8 @@ define <4 x float> @fmla_4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x float> @fmla_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -2347,8 +2347,8 @@ define <2 x double> @fmla_2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x double> @fmla_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -2404,8 +2404,8 @@ define <2 x float> @fmls_2s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x float> @fmls_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -2460,8 +2460,8 @@ define <4 x float> @fmls_4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x float> @fmls_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -2516,8 +2516,8 @@ define <2 x double> @fmls_2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x double> @fmls_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -2572,8 +2572,8 @@ define <2 x float> @fmls_commuted_neg_2s(ptr %A, ptr %B, ptr %C) nounwind saniti
; CHECK-LABEL: define <2 x float> @fmls_commuted_neg_2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -2628,8 +2628,8 @@ define <4 x float> @fmls_commuted_neg_4s(ptr %A, ptr %B, ptr %C) nounwind saniti
; CHECK-LABEL: define <4 x float> @fmls_commuted_neg_4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -2684,8 +2684,8 @@ define <2 x double> @fmls_commuted_neg_2d(ptr %A, ptr %B, ptr %C) nounwind sanit
; CHECK-LABEL: define <2 x double> @fmls_commuted_neg_2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -2969,7 +2969,7 @@ declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x
define <4 x i16> @mul_4h(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @mul_4h(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -2987,7 +2987,7 @@ define <4 x i16> @mul_4h(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory {
define <8 x i16> @mul_8h(<8 x i16> %A, <8 x i16> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @mul_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -3005,7 +3005,7 @@ define <8 x i16> @mul_8h(<8 x i16> %A, <8 x i16> %B) nounwind sanitize_memory {
define <2 x i32> @mul_2s(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @mul_2s(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
@@ -3023,7 +3023,7 @@ define <2 x i32> @mul_2s(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory {
define <4 x i32> @mul_4s(<4 x i32> %A, <4 x i32> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @mul_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3042,7 +3042,7 @@ define <2 x i64> @mul_2d(<2 x i64> %A, <2 x i64> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @mul_2d(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[TMP1:%.*]] = mul <2 x i64> [[A]], [[B]]
@@ -3056,7 +3056,7 @@ define <2 x i64> @mul_2d(<2 x i64> %A, <2 x i64> %B) nounwind sanitize_memory {
define <2 x float> @fmul_lane_2s(<2 x float> %A, <2 x float> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x float> @fmul_lane_2s(
; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
@@ -3074,7 +3074,7 @@ define <2 x float> @fmul_lane_2s(<2 x float> %A, <2 x float> %B) nounwind saniti
define <4 x float> @fmul_lane_4s(<4 x float> %A, <4 x float> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x float> @fmul_lane_4s(
; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3092,7 +3092,7 @@ define <4 x float> @fmul_lane_4s(<4 x float> %A, <4 x float> %B) nounwind saniti
define <2 x double> @fmul_lane_2d(<2 x double> %A, <2 x double> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x double> @fmul_lane_2d(
; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> splat (i64 -1), <2 x i32> <i32 1, i32 1>
@@ -3110,7 +3110,7 @@ define <2 x double> @fmul_lane_2d(<2 x double> %A, <2 x double> %B) nounwind san
define float @fmul_lane_s(float %A, <4 x float> %vec) nounwind sanitize_memory {
; CHECK-LABEL: define float @fmul_lane_s(
; CHECK-SAME: float [[A:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
@@ -3128,7 +3128,7 @@ define float @fmul_lane_s(float %A, <4 x float> %vec) nounwind sanitize_memory {
define double @fmul_lane_d(double %A, <2 x double> %vec) nounwind sanitize_memory {
; CHECK-LABEL: define double @fmul_lane_d(
; CHECK-SAME: double [[A:%.*]], <2 x double> [[VEC:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
@@ -3148,7 +3148,7 @@ define double @fmul_lane_d(double %A, <2 x double> %vec) nounwind sanitize_memor
define <2 x float> @fmulx_lane_2s(<2 x float> %A, <2 x float> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x float> @fmulx_lane_2s(
; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
@@ -3167,7 +3167,7 @@ define <2 x float> @fmulx_lane_2s(<2 x float> %A, <2 x float> %B) nounwind sanit
define <4 x float> @fmulx_lane_4s(<4 x float> %A, <4 x float> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x float> @fmulx_lane_4s(
; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3186,7 +3186,7 @@ define <4 x float> @fmulx_lane_4s(<4 x float> %A, <4 x float> %B) nounwind sanit
define <2 x double> @fmulx_lane_2d(<2 x double> %A, <2 x double> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x double> @fmulx_lane_2d(
; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> splat (i64 -1), <2 x i32> <i32 1, i32 1>
@@ -3205,7 +3205,7 @@ define <2 x double> @fmulx_lane_2d(<2 x double> %A, <2 x double> %B) nounwind sa
define <4 x i16> @sqdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @sqdmulh_lane_4h(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3223,7 +3223,7 @@ define <4 x i16> @sqdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_
define <8 x i16> @sqdmulh_lane_8h(<8 x i16> %A, <8 x i16> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @sqdmulh_lane_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -3241,7 +3241,7 @@ define <8 x i16> @sqdmulh_lane_8h(<8 x i16> %A, <8 x i16> %B) nounwind sanitize_
define <2 x i32> @sqdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @sqdmulh_lane_2s(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
@@ -3259,7 +3259,7 @@ define <2 x i32> @sqdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_
define <4 x i32> @sqdmulh_lane_4s(<4 x i32> %A, <4 x i32> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqdmulh_lane_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3277,7 +3277,7 @@ define <4 x i32> @sqdmulh_lane_4s(<4 x i32> %A, <4 x i32> %B) nounwind sanitize_
define i32 @sqdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind sanitize_memory {
; CHECK-LABEL: define i32 @sqdmulh_lane_1s(
; CHECK-SAME: i32 [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP3]], i32 1
@@ -3295,7 +3295,7 @@ define i32 @sqdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind sanitize_memory {
define <4 x i16> @sqrdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @sqrdmulh_lane_4h(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3313,7 +3313,7 @@ define <4 x i16> @sqrdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind sanitize
define <8 x i16> @sqrdmulh_lane_8h(<8 x i16> %A, <8 x i16> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @sqrdmulh_lane_8h(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -3331,7 +3331,7 @@ define <8 x i16> @sqrdmulh_lane_8h(<8 x i16> %A, <8 x i16> %B) nounwind sanitize
define <2 x i32> @sqrdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @sqrdmulh_lane_2s(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
@@ -3349,7 +3349,7 @@ define <2 x i32> @sqrdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind sanitize
define <4 x i32> @sqrdmulh_lane_4s(<4 x i32> %A, <4 x i32> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqrdmulh_lane_4s(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3367,7 +3367,7 @@ define <4 x i32> @sqrdmulh_lane_4s(<4 x i32> %A, <4 x i32> %B) nounwind sanitize
define i32 @sqrdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind sanitize_memory {
; CHECK-LABEL: define i32 @sqrdmulh_lane_1s(
; CHECK-SAME: i32 [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP3]], i32 1
@@ -3385,7 +3385,7 @@ define i32 @sqrdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind sanitize_memory {
define <4 x i32> @sqdmull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqdmull_lane_4s(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3412,7 +3412,7 @@ define <4 x i32> @sqdmull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_
define <2 x i64> @sqdmull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @sqdmull_lane_2d(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
@@ -3440,7 +3440,7 @@ define <4 x i32> @sqdmull2_lane_4s(<8 x i16> %A, <8 x i16> %B) nounwind sanitize
; CHECK-LABEL: define <4 x i32> @sqdmull2_lane_4s(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP5]], <8 x i16> splat (i16 -1), <4 x i32> <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -3470,7 +3470,7 @@ define <2 x i64> @sqdmull2_lane_2d(<4 x i32> %A, <4 x i32> %B) nounwind sanitize
; CHECK-LABEL: define <2 x i64> @sqdmull2_lane_2d(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> splat (i32 -1), <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> undef, <2 x i32> <i32 2, i32 3>
@@ -3499,7 +3499,7 @@ define <2 x i64> @sqdmull2_lane_2d(<4 x i32> %A, <4 x i32> %B) nounwind sanitize
define <4 x i32> @umull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @umull_lane_4s(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3519,7 +3519,7 @@ define <4 x i32> @umull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_me
define <2 x i64> @umull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @umull_lane_2d(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
@@ -3539,7 +3539,7 @@ define <2 x i64> @umull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_me
define <4 x i32> @smull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @smull_lane_4s(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3559,7 +3559,7 @@ define <4 x i32> @smull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_me
define <2 x i64> @smull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @smull_lane_2d(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
@@ -3579,9 +3579,9 @@ define <2 x i64> @smull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_me
define <4 x i32> @smlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @smlal_lane_4s(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[B]], <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3603,9 +3603,9 @@ define <4 x i32> @smlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi
define <2 x i64> @smlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @smlal_lane_2d(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> <i32 1, i32 1>
@@ -3627,9 +3627,9 @@ define <2 x i64> @smlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi
define <4 x i32> @sqdmlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqdmlal_lane_4s(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[B]], <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3658,9 +3658,9 @@ define <4 x i32> @sqdmlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) noun
define <2 x i64> @sqdmlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @sqdmlal_lane_2d(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> <i32 1, i32 1>
@@ -3690,8 +3690,8 @@ define <4 x i32> @sqdmlal2_lane_4s(<8 x i16> %A, <8 x i16> %B, <4 x i32> %C) nou
; CHECK-LABEL: define <4 x i32> @sqdmlal2_lane_4s(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP7]], <8 x i16> splat (i16 -1), <4 x i32> <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -3724,8 +3724,8 @@ define <2 x i64> @sqdmlal2_lane_2d(<4 x i32> %A, <4 x i32> %B, <2 x i64> %C) nou
; CHECK-LABEL: define <2 x i64> @sqdmlal2_lane_2d(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP7]], <4 x i32> splat (i32 -1), <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> undef, <2 x i32> <i32 2, i32 3>
@@ -3757,8 +3757,8 @@ define <2 x i64> @sqdmlal2_lane_2d(<4 x i32> %A, <4 x i32> %B, <2 x i64> %C) nou
define i32 @sqdmlal_lane_1s(i32 %A, i16 %B, <4 x i16> %C) nounwind sanitize_memory {
; CHECK-LABEL: define i32 @sqdmlal_lane_1s(
; CHECK-SAME: i32 [[A:%.*]], i16 [[B:%.*]], <4 x i16> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i16> splat (i16 -1), i16 [[TMP1]], i32 0
@@ -3794,8 +3794,8 @@ declare i32 @llvm.aarch64.neon.sqadd.i32(i32, i32)
define i32 @sqdmlsl_lane_1s(i32 %A, i16 %B, <4 x i16> %C) nounwind sanitize_memory {
; CHECK-LABEL: define i32 @sqdmlsl_lane_1s(
; CHECK-SAME: i32 [[A:%.*]], i16 [[B:%.*]], <4 x i16> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i16> splat (i16 -1), i16 [[TMP1]], i32 0
@@ -3831,8 +3831,8 @@ declare i32 @llvm.aarch64.neon.sqsub.i32(i32, i32)
define i32 @sqadd_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind sanitize_memory {
; CHECK-LABEL: define i32 @sqadd_lane1_sqdmull4s(
; CHECK-SAME: i32 [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP1]] to i64
@@ -3861,8 +3861,8 @@ define i32 @sqadd_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind s
define i32 @sqsub_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind sanitize_memory {
; CHECK-LABEL: define i32 @sqsub_lane1_sqdmull4s(
; CHECK-SAME: i32 [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP1]] to i64
@@ -3891,8 +3891,8 @@ define i32 @sqsub_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind s
define i64 @sqdmlal_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind sanitize_memory {
; CHECK-LABEL: define i64 @sqdmlal_lane_1d(
; CHECK-SAME: i64 [[A:%.*]], i32 [[B:%.*]], <2 x i32> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
@@ -3922,8 +3922,8 @@ declare i64 @llvm.aarch64.neon.sqadd.i64(i64, i64)
define i64 @sqdmlsl_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind sanitize_memory {
; CHECK-LABEL: define i64 @sqdmlsl_lane_1d(
; CHECK-SAME: i64 [[A:%.*]], i32 [[B:%.*]], <2 x i32> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
@@ -3953,9 +3953,9 @@ declare i64 @llvm.aarch64.neon.sqsub.i64(i64, i64)
define <4 x i32> @umlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @umlal_lane_4s(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[B]], <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -3977,9 +3977,9 @@ define <4 x i32> @umlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi
define <2 x i64> @umlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @umlal_lane_2d(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> <i32 1, i32 1>
@@ -4002,9 +4002,9 @@ define <2 x i64> @umlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi
define <4 x i32> @smlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @smlsl_lane_4s(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[B]], <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -4026,9 +4026,9 @@ define <4 x i32> @smlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi
define <2 x i64> @smlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @smlsl_lane_2d(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> <i32 1, i32 1>
@@ -4050,9 +4050,9 @@ define <2 x i64> @smlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi
define <4 x i32> @sqdmlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqdmlsl_lane_4s(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[B]], <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -4081,9 +4081,9 @@ define <4 x i32> @sqdmlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) noun
define <2 x i64> @sqdmlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @sqdmlsl_lane_2d(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> <i32 1, i32 1>
@@ -4113,8 +4113,8 @@ define <4 x i32> @sqdmlsl2_lane_4s(<8 x i16> %A, <8 x i16> %B, <4 x i32> %C) nou
; CHECK-LABEL: define <4 x i32> @sqdmlsl2_lane_4s(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP7]], <8 x i16> splat (i16 -1), <4 x i32> <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -4147,8 +4147,8 @@ define <2 x i64> @sqdmlsl2_lane_2d(<4 x i32> %A, <4 x i32> %B, <2 x i64> %C) nou
; CHECK-LABEL: define <2 x i64> @sqdmlsl2_lane_2d(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP7]], <4 x i32> splat (i32 -1), <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> undef, <2 x i32> <i32 2, i32 3>
@@ -4180,9 +4180,9 @@ define <2 x i64> @sqdmlsl2_lane_2d(<4 x i32> %A, <4 x i32> %B, <2 x i64> %C) nou
define <4 x i32> @umlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @umlsl_lane_4s(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[B]], <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -4204,9 +4204,9 @@ define <4 x i32> @umlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi
define <2 x i64> @umlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @umlsl_lane_2d(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> <i32 1, i32 1>
@@ -4230,7 +4230,7 @@ define float @fmulxs(float %a, float %b) nounwind sanitize_memory {
; CHECK-LABEL: define float @fmulxs(
; CHECK-SAME: float [[A:%.*]], float [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
@@ -4246,7 +4246,7 @@ define double @fmulxd(double %a, double %b) nounwind sanitize_memory {
; CHECK-LABEL: define double @fmulxd(
; CHECK-SAME: double [[A:%.*]], double [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
@@ -4261,7 +4261,7 @@ define double @fmulxd(double %a, double %b) nounwind sanitize_memory {
define float @fmulxs_lane(float %a, <4 x float> %vec) nounwind sanitize_memory {
; CHECK-LABEL: define float @fmulxs_lane(
; CHECK-SAME: float [[A:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
@@ -4280,7 +4280,7 @@ define float @fmulxs_lane(float %a, <4 x float> %vec) nounwind sanitize_memory {
define double @fmulxd_lane(double %a, <2 x double> %vec) nounwind sanitize_memory {
; CHECK-LABEL: define double @fmulxd_lane(
; CHECK-SAME: double [[A:%.*]], <2 x double> [[VEC:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
@@ -4304,7 +4304,7 @@ define <8 x i16> @smull2_8h_simple(<16 x i8> %a, <16 x i8> %b) nounwind sanitize
; CHECK-LABEL: define <8 x i16> @smull2_8h_simple(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP4]], <16 x i8> splat (i8 -1), <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -4327,7 +4327,7 @@ define <8 x i16> @foo0(<16 x i8> %a, <16 x i8> %b) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @foo0(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x i64>
; CHECK-NEXT: [[TMP:%.*]] = bitcast <16 x i8> [[A]] to <2 x i64>
@@ -4362,7 +4362,7 @@ define <4 x i32> @foo1(<8 x i16> %a, <8 x i16> %b) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @foo1(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP8]] to <2 x i64>
; CHECK-NEXT: [[TMP:%.*]] = bitcast <8 x i16> [[A]] to <2 x i64>
@@ -4397,7 +4397,7 @@ define <2 x i64> @foo2(<4 x i32> %a, <4 x i32> %b) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @foo2(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <4 x i32> [[TMP8]] to <2 x i64>
; CHECK-NEXT: [[TMP:%.*]] = bitcast <4 x i32> [[A]] to <2 x i64>
@@ -4432,7 +4432,7 @@ define <8 x i16> @foo3(<16 x i8> %a, <16 x i8> %b) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @foo3(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x i64>
; CHECK-NEXT: [[TMP:%.*]] = bitcast <16 x i8> [[A]] to <2 x i64>
@@ -4467,7 +4467,7 @@ define <4 x i32> @foo4(<8 x i16> %a, <8 x i16> %b) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @foo4(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP8]] to <2 x i64>
; CHECK-NEXT: [[TMP:%.*]] = bitcast <8 x i16> [[A]] to <2 x i64>
@@ -4502,7 +4502,7 @@ define <2 x i64> @foo5(<4 x i32> %a, <4 x i32> %b) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @foo5(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <4 x i32> [[TMP8]] to <2 x i64>
; CHECK-NEXT: [[TMP:%.*]] = bitcast <4 x i32> [[A]] to <2 x i64>
@@ -4713,8 +4713,8 @@ entry:
define <8 x i16> @bar0(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @bar0(
; CHECK-SAME: <8 x i16> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP9]] to <2 x i64>
@@ -4752,8 +4752,8 @@ define <8 x i16> @bar0(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind saniti
define <4 x i32> @bar1(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @bar1(
; CHECK-SAME: <4 x i32> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP9]] to <2 x i64>
@@ -4791,8 +4791,8 @@ define <4 x i32> @bar1(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind saniti
define <2 x i64> @bar2(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @bar2(
; CHECK-SAME: <2 x i64> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP9]] to <2 x i64>
@@ -4830,8 +4830,8 @@ define <2 x i64> @bar2(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind saniti
define <8 x i16> @bar3(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @bar3(
; CHECK-SAME: <8 x i16> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP9]] to <2 x i64>
@@ -4869,8 +4869,8 @@ define <8 x i16> @bar3(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind saniti
define <4 x i32> @bar4(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @bar4(
; CHECK-SAME: <4 x i32> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP9]] to <2 x i64>
@@ -4908,8 +4908,8 @@ define <4 x i32> @bar4(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind saniti
define <2 x i64> @bar5(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @bar5(
; CHECK-SAME: <2 x i64> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP9]] to <2 x i64>
@@ -4947,8 +4947,8 @@ define <2 x i64> @bar5(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind saniti
define <4 x i32> @mlal2_1(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @mlal2_1(
; CHECK-SAME: <4 x i32> [[A:%.*]], <8 x i16> [[B:%.*]], <4 x i16> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP9]], <4 x i16> splat (i16 -1), <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
@@ -4989,8 +4989,8 @@ define <4 x i32> @mlal2_1(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind san
define <2 x i64> @mlal2_2(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @mlal2_2(
; CHECK-SAME: <2 x i64> [[A:%.*]], <4 x i32> [[B:%.*]], <2 x i32> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP9]], <2 x i32> splat (i32 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -5031,8 +5031,8 @@ define <2 x i64> @mlal2_2(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind san
define <4 x i32> @mlal2_4(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @mlal2_4(
; CHECK-SAME: <4 x i32> [[A:%.*]], <8 x i16> [[B:%.*]], <4 x i16> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP9]], <4 x i16> splat (i16 -1), <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -5073,8 +5073,8 @@ define <4 x i32> @mlal2_4(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind san
define <2 x i64> @mlal2_5(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @mlal2_5(
; CHECK-SAME: <2 x i64> [[A:%.*]], <4 x i32> [[B:%.*]], <2 x i32> [[C:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP9]], <2 x i32> splat (i32 -1), <4 x i32> zeroinitializer
@@ -5960,7 +5960,7 @@ define <1 x double> @test_fmul_v1f64(<1 x double> %L, <1 x double> %R) nounwind
; CHECK-LABEL: define <1 x double> @test_fmul_v1f64(
; CHECK-SAME: <1 x double> [[L:%.*]], <1 x double> [[R:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <1 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[PROD:%.*]] = fmul <1 x double> [[L]], [[R]]
@@ -5975,7 +5975,7 @@ define <1 x double> @test_fdiv_v1f64(<1 x double> %L, <1 x double> %R) nounwind
; CHECK-LABEL: define <1 x double> @test_fdiv_v1f64(
; CHECK-SAME: <1 x double> [[L:%.*]], <1 x double> [[R:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <1 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[PROD:%.*]] = fdiv <1 x double> [[L]], [[R]]
@@ -5990,8 +5990,8 @@ define i32 @sqdmlal_s(i16 %A, i16 %B, i32 %C) nounwind sanitize_memory {
; CHECK-LABEL: define i32 @sqdmlal_s(
; CHECK-SAME: i16 [[A:%.*]], i16 [[B:%.*]], i32 [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i16> splat (i16 -1), i16 [[TMP6]], i64 0
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> undef, i16 [[A]], i64 0
@@ -6026,8 +6026,8 @@ define i64 @sqdmlal_d(i32 %A, i32 %B, i64 %C) nounwind sanitize_memory {
; CHECK-LABEL: define i64 @sqdmlal_d(
; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i32 [[TMP2]], 0
@@ -6052,8 +6052,8 @@ define i32 @sqdmlsl_s(i16 %A, i16 %B, i32 %C) nounwind sanitize_memory {
; CHECK-LABEL: define i32 @sqdmlsl_s(
; CHECK-SAME: i16 [[A:%.*]], i16 [[B:%.*]], i32 [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i16> splat (i16 -1), i16 [[TMP6]], i64 0
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> undef, i16 [[A]], i64 0
@@ -6088,8 +6088,8 @@ define i64 @sqdmlsl_d(i32 %A, i32 %B, i64 %C) nounwind sanitize_memory {
; CHECK-LABEL: define i64 @sqdmlsl_d(
; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i32 [[TMP2]], 0
@@ -6114,7 +6114,7 @@ define <16 x i8> @test_pmull_64(i64 %l, i64 %r) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @test_pmull_64(
; CHECK-SAME: i64 [[L:%.*]], i64 [[R:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
@@ -6132,7 +6132,7 @@ define <16 x i8> @test_pmull_high_64(<2 x i64> %l, <2 x i64> %r) nounwind saniti
; CHECK-LABEL: define <16 x i8> @test_pmull_high_64(
; CHECK-SAME: <2 x i64> [[L:%.*]], <2 x i64> [[R:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
; CHECK-NEXT: [[L_HI:%.*]] = extractelement <2 x i64> [[L]], i32 1
@@ -6158,7 +6158,7 @@ define <1 x i64> @test_mul_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) nounwind saniti
; CHECK-LABEL: define <1 x i64> @test_mul_v1i64(
; CHECK-SAME: <1 x i64> [[LHS:%.*]], <1 x i64> [[RHS:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <1 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[PROD:%.*]] = mul <1 x i64> [[LHS]], [[RHS]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vshift.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vshift.ll
index 7fa9b41..42d2351 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vshift.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vshift.ll
@@ -11,7 +11,7 @@ define <8 x i8> @sqshl8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @sqshl8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]]
@@ -55,7 +55,7 @@ define <4 x i16> @sqshl4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @sqshl4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -99,7 +99,7 @@ define <2 x i32> @sqshl2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @sqshl2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -143,7 +143,7 @@ define <1 x i64> @sqshl1d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <1 x i64> @sqshl1d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -214,7 +214,7 @@ define i64 @sqshl_scalar(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define i64 @sqshl_scalar(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -283,7 +283,7 @@ define <8 x i8> @uqshl8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @uqshl8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -327,7 +327,7 @@ define <4 x i16> @uqshl4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @uqshl4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -371,7 +371,7 @@ define <2 x i32> @uqshl2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @uqshl2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -415,7 +415,7 @@ define <16 x i8> @sqshl16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @sqshl16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -460,7 +460,7 @@ define <8 x i16> @sqshl8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @sqshl8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -505,7 +505,7 @@ define <4 x i32> @sqshl4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqshl4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -550,7 +550,7 @@ define <2 x i64> @sqshl2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @sqshl2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -595,7 +595,7 @@ define <16 x i8> @uqshl16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @uqshl16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -640,7 +640,7 @@ define <8 x i16> @uqshl8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @uqshl8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -685,7 +685,7 @@ define <4 x i32> @uqshl4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @uqshl4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -730,7 +730,7 @@ define <2 x i64> @uqshl2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @uqshl2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -775,7 +775,7 @@ define <1 x i64> @uqshl1d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <1 x i64> @uqshl1d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -846,7 +846,7 @@ define i64 @uqshl_scalar(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define i64 @uqshl_scalar(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -938,7 +938,7 @@ define <8 x i8> @srshl8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @srshl8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -982,7 +982,7 @@ define <4 x i16> @srshl4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @srshl4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1026,7 +1026,7 @@ define <2 x i32> @srshl2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @srshl2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1070,7 +1070,7 @@ define <1 x i64> @srshl1d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <1 x i64> @srshl1d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1141,7 +1141,7 @@ define i64 @srshl_scalar(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define i64 @srshl_scalar(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1210,7 +1210,7 @@ define <8 x i8> @urshl8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @urshl8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1254,7 +1254,7 @@ define <4 x i16> @urshl4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @urshl4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1298,7 +1298,7 @@ define <2 x i32> @urshl2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @urshl2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1342,7 +1342,7 @@ define <1 x i64> @urshl1d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <1 x i64> @urshl1d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1413,7 +1413,7 @@ define i64 @urshl_scalar(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define i64 @urshl_scalar(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1482,7 +1482,7 @@ define <16 x i8> @srshl16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @srshl16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1527,7 +1527,7 @@ define <8 x i16> @srshl8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @srshl8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1572,7 +1572,7 @@ define <4 x i32> @srshl4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @srshl4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1617,7 +1617,7 @@ define <2 x i64> @srshl2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @srshl2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1662,7 +1662,7 @@ define <16 x i8> @urshl16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @urshl16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1707,7 +1707,7 @@ define <8 x i16> @urshl8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @urshl8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1752,7 +1752,7 @@ define <4 x i32> @urshl4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @urshl4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1797,7 +1797,7 @@ define <2 x i64> @urshl2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @urshl2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1864,7 +1864,7 @@ define <8 x i8> @sqrshl8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @sqrshl8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1908,7 +1908,7 @@ define <4 x i16> @sqrshl4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @sqrshl4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1952,7 +1952,7 @@ define <2 x i32> @sqrshl2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @sqrshl2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1996,7 +1996,7 @@ define <8 x i8> @uqrshl8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @uqrshl8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2040,7 +2040,7 @@ define <4 x i16> @uqrshl4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @uqrshl4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2084,7 +2084,7 @@ define <2 x i32> @uqrshl2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @uqrshl2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2128,7 +2128,7 @@ define <16 x i8> @sqrshl16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @sqrshl16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2173,7 +2173,7 @@ define <8 x i16> @sqrshl8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @sqrshl8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2218,7 +2218,7 @@ define <4 x i32> @sqrshl4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqrshl4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2263,7 +2263,7 @@ define <2 x i64> @sqrshl2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @sqrshl2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2308,7 +2308,7 @@ define <1 x i64> @sqrshl1d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <1 x i64> @sqrshl1d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2379,7 +2379,7 @@ define i64 @sqrshl_scalar(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define i64 @sqrshl_scalar(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2448,7 +2448,7 @@ define <16 x i8> @uqrshl16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @uqrshl16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2493,7 +2493,7 @@ define <8 x i16> @uqrshl8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @uqrshl8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2538,7 +2538,7 @@ define <4 x i32> @uqrshl4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @uqrshl4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2583,7 +2583,7 @@ define <2 x i64> @uqrshl2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @uqrshl2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2628,7 +2628,7 @@ define <1 x i64> @uqrshl1d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <1 x i64> @uqrshl1d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -2699,7 +2699,7 @@ define i64 @uqrshl_scalar(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define i64 @uqrshl_scalar(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -3639,7 +3639,7 @@ define <16 x i8> @rshrn16b(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @rshrn16b(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -3682,7 +3682,7 @@ define <8 x i16> @rshrn8h(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @rshrn8h(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -3725,7 +3725,7 @@ define <4 x i32> @rshrn4s(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @rshrn4s(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -3862,7 +3862,7 @@ define <16 x i8> @shrn16b(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @shrn16b(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -3908,7 +3908,7 @@ define <8 x i16> @shrn8h(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @shrn8h(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -3954,7 +3954,7 @@ define <4 x i32> @shrn4s(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @shrn4s(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -4101,7 +4101,7 @@ define <16 x i8> @sqshrn16b(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @sqshrn16b(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -4144,7 +4144,7 @@ define <8 x i16> @sqshrn8h(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @sqshrn8h(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -4187,7 +4187,7 @@ define <4 x i32> @sqshrn4s(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqshrn4s(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -4331,7 +4331,7 @@ define <16 x i8> @sqshrun16b(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @sqshrun16b(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -4374,7 +4374,7 @@ define <8 x i16> @sqshrun8h(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @sqshrun8h(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -4417,7 +4417,7 @@ define <4 x i32> @sqshrun4s(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqshrun4s(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -4561,7 +4561,7 @@ define <16 x i8> @sqrshrn16b(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @sqrshrn16b(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -4604,7 +4604,7 @@ define <8 x i16> @sqrshrn8h(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @sqrshrn8h(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -4647,7 +4647,7 @@ define <4 x i32> @sqrshrn4s(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqrshrn4s(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -4791,7 +4791,7 @@ define <16 x i8> @sqrshrun16b(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @sqrshrun16b(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -4834,7 +4834,7 @@ define <8 x i16> @sqrshrun8h(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @sqrshrun8h(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -4877,7 +4877,7 @@ define <4 x i32> @sqrshrun4s(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sqrshrun4s(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -5021,7 +5021,7 @@ define <16 x i8> @uqrshrn16b(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @uqrshrn16b(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -5064,7 +5064,7 @@ define <8 x i16> @uqrshrn8h(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @uqrshrn8h(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -5107,7 +5107,7 @@ define <4 x i32> @uqrshrn4s(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @uqrshrn4s(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -5251,7 +5251,7 @@ define <16 x i8> @uqshrn16b(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @uqshrn16b(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -5294,7 +5294,7 @@ define <8 x i16> @uqshrn8h(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @uqshrn8h(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -5337,7 +5337,7 @@ define <4 x i32> @uqshrn4s(ptr %ret, ptr %A) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @uqshrn4s(
; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -6845,7 +6845,7 @@ define <8 x i8> @ursra8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @ursra8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -6888,7 +6888,7 @@ define <4 x i16> @ursra4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @ursra4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -6931,7 +6931,7 @@ define <2 x i32> @ursra2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @ursra2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -6974,7 +6974,7 @@ define <16 x i8> @ursra16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @ursra16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7017,7 +7017,7 @@ define <8 x i16> @ursra8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @ursra8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7060,7 +7060,7 @@ define <4 x i32> @ursra4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @ursra4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7103,7 +7103,7 @@ define <2 x i64> @ursra2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @ursra2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7146,7 +7146,7 @@ define <1 x i64> @ursra1d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <1 x i64> @ursra1d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7189,7 +7189,7 @@ define i64 @ursra_scalar(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define i64 @ursra_scalar(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7232,7 +7232,7 @@ define <8 x i8> @srsra8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @srsra8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7275,7 +7275,7 @@ define <4 x i16> @srsra4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @srsra4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7318,7 +7318,7 @@ define <2 x i32> @srsra2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @srsra2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7361,7 +7361,7 @@ define <16 x i8> @srsra16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @srsra16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7404,7 +7404,7 @@ define <8 x i16> @srsra8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @srsra8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7447,7 +7447,7 @@ define <4 x i32> @srsra4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @srsra4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7490,7 +7490,7 @@ define <2 x i64> @srsra2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @srsra2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7533,7 +7533,7 @@ define <1 x i64> @srsra1d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <1 x i64> @srsra1d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7576,7 +7576,7 @@ define i64 @srsra_scalar(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define i64 @srsra_scalar(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7619,7 +7619,7 @@ define <8 x i8> @usra8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @usra8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7662,7 +7662,7 @@ define <4 x i16> @usra4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @usra4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7705,7 +7705,7 @@ define <2 x i32> @usra2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @usra2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7748,7 +7748,7 @@ define <16 x i8> @usra16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @usra16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7791,7 +7791,7 @@ define <8 x i16> @usra8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @usra8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7834,7 +7834,7 @@ define <4 x i32> @usra4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @usra4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7877,7 +7877,7 @@ define <2 x i64> @usra2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @usra2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7920,7 +7920,7 @@ define <1 x i64> @usra1d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <1 x i64> @usra1d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -7963,7 +7963,7 @@ define <8 x i8> @ssra8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @ssra8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8006,7 +8006,7 @@ define <4 x i16> @ssra4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @ssra4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8049,7 +8049,7 @@ define <2 x i32> @ssra2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @ssra2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8092,7 +8092,7 @@ define <16 x i8> @ssra16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @ssra16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8135,7 +8135,7 @@ define <8 x i16> @ssra8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @ssra8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8178,7 +8178,7 @@ define <4 x i32> @ssra4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @ssra4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8221,7 +8221,7 @@ define <2 x i64> @ssra2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @ssra2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8264,7 +8264,7 @@ define <8 x i8> @shr_orr8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @shr_orr8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8313,7 +8313,7 @@ define <4 x i16> @shr_orr4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @shr_orr4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8362,7 +8362,7 @@ define <2 x i32> @shr_orr2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @shr_orr2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8411,7 +8411,7 @@ define <16 x i8> @shr_orr16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @shr_orr16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8460,7 +8460,7 @@ define <8 x i16> @shr_orr8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @shr_orr8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8509,7 +8509,7 @@ define <4 x i32> @shr_orr4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @shr_orr4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8558,7 +8558,7 @@ define <2 x i64> @shr_orr2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @shr_orr2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8607,7 +8607,7 @@ define <8 x i8> @shl_orr8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @shl_orr8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8656,7 +8656,7 @@ define <4 x i16> @shl_orr4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @shl_orr4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8705,7 +8705,7 @@ define <2 x i32> @shl_orr2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @shl_orr2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8754,7 +8754,7 @@ define <16 x i8> @shl_orr16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @shl_orr16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8803,7 +8803,7 @@ define <8 x i16> @shl_orr8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @shl_orr8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8852,7 +8852,7 @@ define <4 x i32> @shl_orr4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @shl_orr4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8901,7 +8901,7 @@ define <2 x i64> @shl_orr2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @shl_orr2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -8989,7 +8989,7 @@ define <8 x i8> @sli8b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i8> @sli8b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -9037,7 +9037,7 @@ define <4 x i16> @sli4h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i16> @sli4h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -9085,7 +9085,7 @@ define <2 x i32> @sli2s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i32> @sli2s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -9133,7 +9133,7 @@ define <1 x i64> @sli1d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <1 x i64> @sli1d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -9181,7 +9181,7 @@ define <16 x i8> @sli16b(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <16 x i8> @sli16b(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -9229,7 +9229,7 @@ define <8 x i16> @sli8h(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <8 x i16> @sli8h(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -9277,7 +9277,7 @@ define <4 x i32> @sli4s(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <4 x i32> @sli4s(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -9325,7 +9325,7 @@ define <2 x i64> @sli2d(ptr %A, ptr %B) nounwind sanitize_memory {
; CHECK-LABEL: define <2 x i64> @sli2d(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -9383,7 +9383,7 @@ define <1 x i64> @ashr_v1i64(<1 x i64> %a, <1 x i64> %b) sanitize_memory {
; CHECK-LABEL: define <1 x i64> @ashr_v1i64(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <1 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <1 x i1> [[TMP3]] to <1 x i64>
@@ -9402,8 +9402,8 @@ define void @sqshl_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) sanit
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 1, i32 3>
@@ -9437,8 +9437,8 @@ define void @uqshl_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) sanit
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 1, i32 3>
@@ -9472,8 +9472,8 @@ define void @srshl_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) sanit
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 1, i32 3>
@@ -9507,8 +9507,8 @@ define void @urshl_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) sanit
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 1, i32 3>
@@ -9542,8 +9542,8 @@ define void @sqshlu_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) sani
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 1, i32 3>
@@ -9577,8 +9577,8 @@ define void @sshl_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) saniti
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 1, i32 3>
@@ -9612,8 +9612,8 @@ define void @ushl_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) saniti
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> <i32 1, i32 3>
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst_float.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst_float.ll
index 8fed5a7..ef20040 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst_float.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst_float.ll
@@ -37,16 +37,16 @@ target triple = "aarch64--linux-android9001"
define void @st1x2_v1f64(<1 x double> %A, <1 x double> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st1x2_v1f64(
; CHECK-SAME: <1 x double> [[A:%.*]], <1 x double> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1:![0-9]+]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
; CHECK-NEXT: unreachable
@@ -61,16 +61,16 @@ define void @st1x2_v1f64(<1 x double> %A, <1 x double> %B, ptr %p) sanitize_memo
define void @st1x2_v1i64(<1 x i64> %A, <1 x i64> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st1x2_v1i64(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -85,16 +85,16 @@ define void @st1x2_v1i64(<1 x i64> %A, <1 x i64> %B, ptr %p) sanitize_memory {
define void @st1x2_v2f64(<2 x double> %A, <2 x double> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st1x2_v2f64(
; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -109,16 +109,16 @@ define void @st1x2_v2f64(<2 x double> %A, <2 x double> %B, ptr %p) sanitize_memo
define void @st1x2_v2i64(<2 x i64> %A, <2 x i64> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st1x2_v2i64(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -133,17 +133,17 @@ define void @st1x2_v2i64(<2 x i64> %A, <2 x i64> %B, ptr %p) sanitize_memory {
define void @st1x3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st1x3_v1f64(
; CHECK-SAME: <1 x double> [[A:%.*]], <1 x double> [[B:%.*]], <1 x double> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -158,17 +158,17 @@ define void @st1x3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr
define void @st1x3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st1x3_v1i64(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -183,17 +183,17 @@ define void @st1x3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %p) sanit
define void @st1x3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st1x3_v2f64(
; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -208,17 +208,17 @@ define void @st1x3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr
define void @st1x3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st1x3_v2i64(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -233,18 +233,18 @@ define void @st1x3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %p) sanit
define void @st1x4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st1x4_v1f64(
; CHECK-SAME: <1 x double> [[A:%.*]], <1 x double> [[B:%.*]], <1 x double> [[C:%.*]], <1 x double> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], <1 x i64> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -259,18 +259,18 @@ define void @st1x4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x
define void @st1x4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st1x4_v1i64(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], <1 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], <1 x i64> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -285,18 +285,18 @@ define void @st1x4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D,
define void @st1x4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st1x4_v2f64(
; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], <2 x double> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -311,18 +311,18 @@ define void @st1x4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x
define void @st1x4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st1x4_v2i64(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -337,16 +337,16 @@ define void @st1x4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D,
define void @st2_v16i8(<16 x i8> %A, <16 x i8> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v16i8(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -361,16 +361,16 @@ define void @st2_v16i8(<16 x i8> %A, <16 x i8> %B, ptr %p) sanitize_memory {
define void @st2_v1f64(<1 x double> %A, <1 x double> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v1f64(
; CHECK-SAME: <1 x double> [[A:%.*]], <1 x double> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -385,16 +385,16 @@ define void @st2_v1f64(<1 x double> %A, <1 x double> %B, ptr %p) sanitize_memory
define void @st2_v1i64(<1 x i64> %A, <1 x i64> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v1i64(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -409,16 +409,16 @@ define void @st2_v1i64(<1 x i64> %A, <1 x i64> %B, ptr %p) sanitize_memory {
define void @st2_v2f32(<2 x float> %A, <2 x float> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v2f32(
; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -433,16 +433,16 @@ define void @st2_v2f32(<2 x float> %A, <2 x float> %B, ptr %p) sanitize_memory {
define void @st2_v2f64(<2 x double> %A, <2 x double> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v2f64(
; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -457,16 +457,16 @@ define void @st2_v2f64(<2 x double> %A, <2 x double> %B, ptr %p) sanitize_memory
define void @st2_v2i32(<2 x i32> %A, <2 x i32> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v2i32(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -481,16 +481,16 @@ define void @st2_v2i32(<2 x i32> %A, <2 x i32> %B, ptr %p) sanitize_memory {
define void @st2_v2i64(<2 x i64> %A, <2 x i64> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v2i64(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -505,16 +505,16 @@ define void @st2_v2i64(<2 x i64> %A, <2 x i64> %B, ptr %p) sanitize_memory {
define void @st2_v4f16(<4 x half> %A, <4 x half> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v4f16(
; CHECK-SAME: <4 x half> [[A:%.*]], <4 x half> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -529,16 +529,16 @@ define void @st2_v4f16(<4 x half> %A, <4 x half> %B, ptr %p) sanitize_memory {
define void @st2_v4f32(<4 x float> %A, <4 x float> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v4f32(
; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -553,16 +553,16 @@ define void @st2_v4f32(<4 x float> %A, <4 x float> %B, ptr %p) sanitize_memory {
define void @st2_v4i16(<4 x i16> %A, <4 x i16> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v4i16(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -577,16 +577,16 @@ define void @st2_v4i16(<4 x i16> %A, <4 x i16> %B, ptr %p) sanitize_memory {
define void @st2_v4i32(<4 x i32> %A, <4 x i32> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v4i32(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -601,16 +601,16 @@ define void @st2_v4i32(<4 x i32> %A, <4 x i32> %B, ptr %p) sanitize_memory {
define void @st2_v8f16(<8 x half> %A, <8 x half> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v8f16(
; CHECK-SAME: <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -625,16 +625,16 @@ define void @st2_v8f16(<8 x half> %A, <8 x half> %B, ptr %p) sanitize_memory {
define void @st2_v8i16(<8 x i16> %A, <8 x i16> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v8i16(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -649,16 +649,16 @@ define void @st2_v8i16(<8 x i16> %A, <8 x i16> %B, ptr %p) sanitize_memory {
define void @st2_v8i8(<8 x i8> %A, <8 x i8> %B, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st2_v8i8(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], ptr [[TMP6]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -673,17 +673,17 @@ define void @st2_v8i8(<8 x i8> %A, <8 x i8> %B, ptr %p) sanitize_memory {
define void @st3_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v16i8(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -698,17 +698,17 @@ define void @st3_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %p) sanitiz
define void @st3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v1f64(
; CHECK-SAME: <1 x double> [[A:%.*]], <1 x double> [[B:%.*]], <1 x double> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -723,17 +723,17 @@ define void @st3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr %p
define void @st3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v1i64(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -748,17 +748,17 @@ define void @st3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %p) sanitiz
define void @st3_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v2f32(
; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]], <2 x float> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -773,17 +773,17 @@ define void @st3_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, ptr %p) s
define void @st3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v2f64(
; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -798,17 +798,17 @@ define void @st3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr %p
define void @st3_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v2i32(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -823,17 +823,17 @@ define void @st3_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %p) sanitiz
define void @st3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v2i64(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -848,17 +848,17 @@ define void @st3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %p) sanitiz
define void @st3_v4f16(<4 x half> %A, <4 x half> %B, <4 x half> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v4f16(
; CHECK-SAME: <4 x half> [[A:%.*]], <4 x half> [[B:%.*]], <4 x half> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i16> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -873,17 +873,17 @@ define void @st3_v4f16(<4 x half> %A, <4 x half> %B, <4 x half> %C, ptr %p) sani
define void @st3_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v4f32(
; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -898,17 +898,17 @@ define void @st3_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, ptr %p) s
define void @st3_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v4i16(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i16> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -923,17 +923,17 @@ define void @st3_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %p) sanitiz
define void @st3_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v4i32(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -948,17 +948,17 @@ define void @st3_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %p) sanitiz
define void @st3_v8f16(<8 x half> %A, <8 x half> %B, <8 x half> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v8f16(
; CHECK-SAME: <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -973,17 +973,17 @@ define void @st3_v8f16(<8 x half> %A, <8 x half> %B, <8 x half> %C, ptr %p) sani
define void @st3_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v8i16(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -998,17 +998,17 @@ define void @st3_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %p) sanitiz
define void @st3_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st3_v8i8(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], <8 x i8> [[TMP4]], ptr [[TMP7]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1023,18 +1023,18 @@ define void @st3_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %p) sanitize_me
define void @st4_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v16i8(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1049,18 +1049,18 @@ define void @st4_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, p
define void @st4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v1f64(
; CHECK-SAME: <1 x double> [[A:%.*]], <1 x double> [[B:%.*]], <1 x double> [[C:%.*]], <1 x double> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], <1 x i64> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1075,18 +1075,18 @@ define void @st4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x d
define void @st4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v1i64(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], <1 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], <1 x i64> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1101,18 +1101,18 @@ define void @st4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, p
define void @st4_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, <2 x float> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v2f32(
; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]], <2 x float> [[C:%.*]], <2 x float> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> [[TMP4]], <2 x i32> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1127,18 +1127,18 @@ define void @st4_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, <2 x floa
define void @st4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v2f64(
; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], <2 x double> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1153,18 +1153,18 @@ define void @st4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x d
define void @st4_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v2i32(
; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], <2 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> [[TMP4]], <2 x i32> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1179,18 +1179,18 @@ define void @st4_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, p
define void @st4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v2i64(
; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1205,18 +1205,18 @@ define void @st4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, p
define void @st4_v4f16(<4 x half> %A, <4 x half> %B, <4 x half> %C, <4 x half> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v4f16(
; CHECK-SAME: <4 x half> [[A:%.*]], <4 x half> [[B:%.*]], <4 x half> [[C:%.*]], <4 x half> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i16> [[TMP4]], <4 x i16> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1231,18 +1231,18 @@ define void @st4_v4f16(<4 x half> %A, <4 x half> %B, <4 x half> %C, <4 x half> %
define void @st4_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, <4 x float> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v4f32(
; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], <4 x float> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1257,18 +1257,18 @@ define void @st4_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, <4 x floa
define void @st4_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v4i16(
; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], <4 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i16> [[TMP4]], <4 x i16> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1283,18 +1283,18 @@ define void @st4_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, p
define void @st4_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v4i32(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1309,18 +1309,18 @@ define void @st4_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, p
define void @st4_v8f16(<8 x half> %A, <8 x half> %B, <8 x half> %C, <8 x half> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v8f16(
; CHECK-SAME: <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]], <8 x half> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1335,18 +1335,18 @@ define void @st4_v8f16(<8 x half> %A, <8 x half> %B, <8 x half> %C, <8 x half> %
define void @st4_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v8i16(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1361,18 +1361,18 @@ define void @st4_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, p
define void @st4_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %p) sanitize_memory {
; CHECK-LABEL: define void @st4_v8i8(
; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], <8 x i8> [[TMP4]], <8 x i8> [[TMP5]], ptr [[TMP8]])
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
@@ -1384,5 +1384,5 @@ define void @st4_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %p
ret void
}
;.
-; CHECK: [[PROF0]] = !{!"branch_weights", i32 1, i32 1048575}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
;.
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
index f3cceb7c..b8e54a7 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
@@ -61,17 +61,17 @@ define i32 @bar() {
; array. General purpose registers are saved at positions from 0 to 64, Floating
; point and SIMD are saved from 64 to 192, and the remaining from 192.
; CHECK-LABEL: @bar
-; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 8
-; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 16
-; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 64
-; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 80
-; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 24
-; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 32
-; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 96
-; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 40
-; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 48
-; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 56
-; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 192
+; CHECK: store {{.*}} @__msan_va_arg_tls, i64 8
+; CHECK: store {{.*}} @__msan_va_arg_tls, i64 16
+; CHECK: store {{.*}} @__msan_va_arg_tls, i64 64
+; CHECK: store {{.*}} @__msan_va_arg_tls, i64 80
+; CHECK: store {{.*}} @__msan_va_arg_tls, i64 24
+; CHECK: store {{.*}} @__msan_va_arg_tls, i64 32
+; CHECK: store {{.*}} @__msan_va_arg_tls, i64 96
+; CHECK: store {{.*}} @__msan_va_arg_tls, i64 40
+; CHECK: store {{.*}} @__msan_va_arg_tls, i64 48
+; CHECK: store {{.*}} @__msan_va_arg_tls, i64 56
+; CHECK: store {{.*}} @__msan_va_arg_tls, i64 192
; CHECK: store {{.*}} 8, {{.*}} @__msan_va_arg_overflow_size_tls
; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
@@ -97,6 +97,6 @@ entry:
}
; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
-; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
+; CHECK: getelementptr (i8, ptr @__msan_va_arg_tls, i64 792)
+; CHECK-NOT: getelementptr (i8, ptr @__msan_va_arg_tls, i64 800)
declare i64 @sum(i64 %n, ...)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
index 06a34ac..d246e96 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
@@ -39,9 +39,9 @@ define linkonce_odr dso_local void @_Z4testIcEvT_(i8 noundef %arg) sanitize_memo
; CHECK-NEXT: [[_MSPROP:%.*]] = zext i8 [[_MSLD]] to i32
; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP7]] to i32
; CHECK-NEXT: store i8 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 [[_MSPROP]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i32 [[_MSPROP]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i8, i32, ...) @_Z5test2IcEvT_iz(i8 noundef [[TMP7]], i32 noundef 1, i32 noundef [[CONV]])
; CHECK-NEXT: ret void
@@ -80,9 +80,9 @@ define linkonce_odr dso_local void @_Z4testIiEvT_(i32 noundef %arg) sanitize_mem
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 4
; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i32 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i32, i32, ...) @_Z5test2IiEvT_iz(i32 noundef [[TMP7]], i32 noundef 1, i32 noundef [[TMP7]])
; CHECK-NEXT: ret void
@@ -122,9 +122,9 @@ define linkonce_odr dso_local void @_Z4testIfEvT_(float noundef %arg) sanitize_m
; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[_MSLD]] to i64
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[TMP7]] to double
; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[TMP11]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[TMP11]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (float, i32, ...) @_Z5test2IfEvT_iz(float noundef [[TMP7]], i32 noundef 1, double noundef [[CONV]])
; CHECK-NEXT: ret void
@@ -163,9 +163,9 @@ define linkonce_odr dso_local void @_Z4testIdEvT_(double noundef %arg) sanitize_
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (double, i32, ...) @_Z5test2IdEvT_iz(double noundef [[TMP7]], i32 noundef 1, double noundef [[TMP7]])
; CHECK-NEXT: ret void
@@ -203,9 +203,9 @@ define linkonce_odr dso_local void @_Z4testIeEvT_(fp128 noundef %arg) sanitize_m
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i128, ptr [[TMP10]], align 16
; CHECK-NEXT: store i128 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i128 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i128 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i128 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i128 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (fp128, i32, ...) @_Z5test2IeEvT_iz(fp128 noundef [[TMP7]], i32 noundef 1, fp128 noundef [[TMP7]])
; CHECK-NEXT: ret void
@@ -243,9 +243,9 @@ define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(i64 %arg.coerce) sanitiz
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP9]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i64, i32, ...) @_Z5test2I6IntIntEvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]])
; CHECK-NEXT: ret void
@@ -302,9 +302,9 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_([2 x i64] %arg.coer
; CHECK-NEXT: [[TMP19:%.*]] = insertvalue [2 x i64] [[TMP18]], i64 [[_MSLD1]], 1
; CHECK-NEXT: [[DOTFCA_1_INSERT3:%.*]] = insertvalue [2 x i64] [[DOTFCA_0_INSERT2]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], 1
; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void ([2 x i64], i32, ...) @_Z5test2I10Int64Int64EvT_iz([2 x i64] [[DOTFCA_1_INSERT3]], i32 noundef 1, [2 x i64] [[DOTFCA_1_INSERT3]])
; CHECK-NEXT: ret void
@@ -368,9 +368,9 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_([2 x double] alig
; CHECK-NEXT: [[TMP19:%.*]] = insertvalue [2 x i64] [[TMP18]], i64 [[_MSLD1]], 1
; CHECK-NEXT: [[DOTFCA_1_INSERT3:%.*]] = insertvalue [2 x double] [[DOTFCA_0_INSERT2]], double [[AGG_TMP_SROA_2_0_COPYLOAD]], 1
; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void ([2 x double], i32, ...) @_Z5test2I12DoubleDoubleEvT_iz([2 x double] alignstack(8) [[DOTFCA_1_INSERT3]], i32 noundef 1, [2 x double] alignstack(8) [[DOTFCA_1_INSERT3]])
; CHECK-NEXT: ret void
@@ -464,9 +464,9 @@ define linkonce_odr dso_local void @_Z4testI7Double4EvT_([4 x double] alignstack
; CHECK-NEXT: [[TMP35:%.*]] = insertvalue [4 x i64] [[TMP34]], i64 [[_MSLD3]], 3
; CHECK-NEXT: [[DOTFCA_3_INSERT7:%.*]] = insertvalue [4 x double] [[DOTFCA_2_INSERT6]], double [[AGG_TMP_SROA_4_0_COPYLOAD]], 3
; CHECK-NEXT: store [4 x i64] [[TMP35]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store [4 x i64] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store [4 x i64] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: store [4 x i64] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: store [4 x i64] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void ([4 x double], i32, ...) @_Z5test2I7Double4EvT_iz([4 x double] alignstack(8) [[DOTFCA_3_INSERT7]], i32 noundef 1, [4 x double] alignstack(8) [[DOTFCA_3_INSERT7]])
; CHECK-NEXT: ret void
@@ -540,9 +540,9 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_([2 x i64] %arg.coe
; CHECK-NEXT: [[TMP19:%.*]] = insertvalue [2 x i64] [[TMP18]], i64 [[_MSLD1]], 1
; CHECK-NEXT: [[DOTFCA_1_INSERT3:%.*]] = insertvalue [2 x i64] [[DOTFCA_0_INSERT2]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], 1
; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void ([2 x i64], i32, ...) @_Z5test2I11DoubleFloatEvT_iz([2 x i64] [[DOTFCA_1_INSERT3]], i32 noundef 1, [2 x i64] [[DOTFCA_1_INSERT3]])
; CHECK-NEXT: ret void
@@ -606,9 +606,9 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble2EvT_([2 x fp128] aligns
; CHECK-NEXT: [[TMP19:%.*]] = insertvalue [2 x i128] [[TMP18]], i128 [[_MSLD1]], 1
; CHECK-NEXT: [[DOTFCA_1_INSERT5:%.*]] = insertvalue [2 x fp128] [[DOTFCA_0_INSERT4]], fp128 [[AGG_TMP_SROA_2_0_COPYLOAD]], 1
; CHECK-NEXT: store [2 x i128] [[TMP19]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store [2 x i128] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store [2 x i128] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: store [2 x i128] [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: store [2 x i128] [[TMP19]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void ([2 x fp128], i32, ...) @_Z5test2I11LongDouble2EvT_iz([2 x fp128] alignstack(16) [[DOTFCA_1_INSERT5]], i32 noundef 1, [2 x fp128] alignstack(16) [[DOTFCA_1_INSERT5]])
; CHECK-NEXT: ret void
@@ -702,9 +702,9 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble4EvT_([4 x fp128] aligns
; CHECK-NEXT: [[TMP35:%.*]] = insertvalue [4 x i128] [[TMP34]], i128 [[_MSLD3]], 3
; CHECK-NEXT: [[DOTFCA_3_INSERT7:%.*]] = insertvalue [4 x fp128] [[DOTFCA_2_INSERT6]], fp128 [[AGG_TMP_SROA_4_0_COPYLOAD]], 3
; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void ([4 x fp128], i32, ...) @_Z5test2I11LongDouble4EvT_iz([4 x fp128] alignstack(16) [[DOTFCA_3_INSERT7]], i32 noundef 1, [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT7]])
; CHECK-NEXT: ret void
@@ -759,29 +759,19 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28
-; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
@@ -852,29 +842,19 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28
-; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
@@ -937,29 +917,19 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28
-; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
@@ -1022,29 +992,19 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28
-; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
@@ -1107,29 +1067,19 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 nound
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28
-; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
@@ -1192,29 +1142,19 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28
-; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
@@ -1277,29 +1217,19 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coe
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28
-; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
@@ -1362,29 +1292,19 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] a
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28
-; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
@@ -1447,29 +1367,19 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignst
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28
-; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
@@ -1532,29 +1442,19 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.co
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28
-; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
@@ -1617,29 +1517,19 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] ali
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28
-; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
@@ -1702,29 +1592,19 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] ali
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0
; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24
; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28
-; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28
; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
@@ -1838,29 +1718,29 @@ define linkonce_odr dso_local void @_Z4test2I11LongDouble4EvT_([4 x fp128] align
; CHECK-NEXT: [[TMP35:%.*]] = insertvalue [4 x i128] [[TMP34]], i128 [[_MSLD3]], 3
; CHECK-NEXT: [[DOTFCA_3_INSERT121:%.*]] = insertvalue [4 x fp128] [[DOTFCA_2_INSERT120]], fp128 [[AGG_TMP_SROA_4_0_COPYLOAD]], 3
; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8
-; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8
-; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), i8 0, i32 32, i1 false)
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 328), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 456), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 520), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 584), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 648), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 712), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), align 8
+; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), align 8
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 768), i8 0, i32 32, i1 false)
; CHECK-NEXT: store i64 1216, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void ([4 x fp128], i32, ...) @_Z5test2I11LongDouble4EvT_iz([4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], i32 noundef 20, [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]])
; CHECK-NEXT: ret void
diff --git a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
index e05018c..cbdae25 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
@@ -59,12 +59,12 @@ define i32 @bar() {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 4), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -87,11 +87,11 @@ define i32 @bar2() {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -112,205 +112,205 @@ define dso_local i64 @many_args() {
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 792), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 792), align 8
; CHECK-NEXT: store i64 960, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll b/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll
index e6d3a4b..a0dcefd 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
@@ -9,12 +10,36 @@ declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @foo(i32 %guard, ...) {
-; CHECK-LABEL: @foo
-; CHECK: [[TMP1:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
-; CHECK: [[TMP3:%.*]] = alloca {{.*}} [[TMP1]]
-; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 0, i64 [[TMP1]], i1 false)
-; CHECK: [[TMP4:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP4]], i1 false)
+; CHECK-LABEL: define i32 @foo(
+; CHECK-SAME: i32 [[GUARD:%.*]], ...) {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
+; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
call void @llvm.lifetime.start.p0(ptr %vl)
@@ -27,11 +52,22 @@ define i32 @foo(i32 %guard, ...) {
;; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
;; array.
define i32 @bar() {
-; CHECK-LABEL: @bar
-; CHECK: store i32 0, ptr @__msan_va_arg_tls, align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
+; CHECK-LABEL: define i32 @bar() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
;
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
@@ -40,23 +76,36 @@ define i32 @bar() {
;; Check multiple fixed arguments.
declare i32 @foo2(i32 %g1, i32 %g2, ...)
define i32 @bar2() {
-; CHECK-LABEL: @bar2
-; CHECK: store i64 0, ptr @__msan_va_arg_tls, align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
+; CHECK-LABEL: define i32 @bar2() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
;
%1 = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
+; UTC_ARGS: --disable
+
;; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
;; passed to a variadic function.
declare i64 @sum(i64 %n, ...)
define dso_local i64 @many_args() {
;; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
+; CHECK: getelementptr (i8, ptr @__msan_va_arg_tls, i64 792)
+; CHECK-NOT: getelementptr (i8, ptr @__msan_va_arg_tls, i64 800)
;
entry:
%ret = call i64 (i64, ...) @sum(i64 120,
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
index 69a74a3..1187531 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
@@ -1,9 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
target datalayout = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
target triple = "mips64--linux"
define i32 @foo(i32 %guard, ...) {
+; CHECK-LABEL: define i32 @foo(
+; CHECK-SAME: i32 [[GUARD:%.*]], ...) {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 549755813888
+; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 549755813888
+; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
+; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 549755813888
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 0
+;
%vl = alloca ptr, align 8
call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
@@ -12,23 +44,29 @@ define i32 @foo(i32 %guard, ...) {
ret i32 0
}
-; First, check allocation of the save area.
-
-; CHECK-LABEL: @foo
-; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
-; CHECK: [[C:%.*]] = alloca {{.*}} [[A]]
-
-; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[A]], i1 false)
-
-; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800)
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
-
declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
+; CHECK-LABEL: define i32 @bar() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 4), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
@@ -36,23 +74,32 @@ define i32 @bar() {
; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
; array. The first argument is stored at position 4, since it's right
; justified.
-; CHECK-LABEL: @bar
-; CHECK: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
; Check multiple fixed arguments.
declare i32 @foo2(i32 %g1, i32 %g2, ...)
define i32 @bar2() {
+; CHECK-LABEL: define i32 @bar2() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%1 = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
-; CHECK-LABEL: @bar2
-; CHECK: store i64 0, ptr @__msan_va_arg_tls, align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
+
+; UTC_ARGS: --disable
; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
; passed to a variadic function.
@@ -77,8 +124,8 @@ entry:
; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
+; CHECK: getelementptr (i8, ptr @__msan_va_arg_tls, i64 792)
+; CHECK-NOT: getelementptr (i8, ptr @__msan_va_arg_tls, i64 800)
declare i64 @sum(i64 %n, ...)
; CHECK: declare void @__msan_maybe_warning_1(i8 signext, i32 signext)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
index b19da8e..a78285a 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
@@ -1,9 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
target datalayout = "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
target triple = "mips64el--linux"
define i32 @foo(i32 %guard, ...) {
+; CHECK-LABEL: define i32 @foo(
+; CHECK-SAME: i32 [[GUARD:%.*]], ...) {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 549755813888
+; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 549755813888
+; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
+; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 549755813888
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 0
+;
%vl = alloca ptr, align 8
call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
@@ -12,46 +44,60 @@ define i32 @foo(i32 %guard, ...) {
ret i32 0
}
-; First, check allocation of the save area.
-
-; CHECK-LABEL: @foo
-; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
-; CHECK: [[C:%.*]] = alloca {{.*}} [[A]]
-
-; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[A]], i1 false)
-
-; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800)
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
-
declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
+; CHECK-LABEL: define i32 @bar() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
; array.
-; CHECK-LABEL: @bar
-; CHECK: store i32 0, ptr @__msan_va_arg_tls, align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
; Check multiple fixed arguments.
declare i32 @foo2(i32 %g1, i32 %g2, ...)
define i32 @bar2() {
+; CHECK-LABEL: define i32 @bar2() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%1 = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
-; CHECK-LABEL: @bar2
-; CHECK: store i64 0, ptr @__msan_va_arg_tls, align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
+; UTC_ARGS: --disable
; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
; passed to a variadic function.
@@ -76,6 +122,6 @@ entry:
; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
+; CHECK: getelementptr (i8, ptr @__msan_va_arg_tls, i64 792)
+; CHECK-NOT: getelementptr (i8, ptr @__msan_va_arg_tls, i64 800)
declare i64 @sum(i64 %n, ...)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
index 4d47b02..9257622f 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
@@ -59,12 +59,12 @@ define i32 @bar() {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 4), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -87,11 +87,11 @@ define i32 @bar2() {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -112,205 +112,205 @@ define dso_local i64 @many_args() {
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 792), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 792), align 8
; CHECK-NEXT: store i64 960, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
index 98294e7..690dc2a 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
@@ -59,12 +59,12 @@ define i32 @bar() {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -86,11 +86,11 @@ define i32 @bar2() {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -111,205 +111,205 @@ define dso_local i64 @many_args() {
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 792), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 792), align 8
; CHECK-NEXT: store i64 960, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
index 9351067..6dc896f 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
@@ -1,9 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64--linux"
define i32 @foo(i32 %guard, ...) {
+; CHECK-LABEL: define i32 @foo(
+; CHECK-SAME: i32 [[GUARD:%.*]], ...) {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -246290604621825
+; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 17592186044416
+; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], 8796093022208
+; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP8]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
+; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -246290604621825
+; CHECK-NEXT: [[TMP11:%.*]] = xor i64 [[TMP10]], 17592186044416
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], 8796093022208
+; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP13]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
+; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8
+; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[TMP16]] to i64
+; CHECK-NEXT: [[TMP18:%.*]] = and i64 [[TMP17]], -246290604621825
+; CHECK-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], 17592186044416
+; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 8796093022208
+; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP21]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 0
+;
%vl = alloca ptr, align 8
call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
@@ -12,23 +50,29 @@ define i32 @foo(i32 %guard, ...) {
ret i32 0
}
-; First, check allocation of the save area.
-
-; CHECK-LABEL: @foo
-; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
-; CHECK: [[C:%.*]] = alloca {{.*}} [[A]]
-
-; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[A]], i1 false)
-
-; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800)
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
-
declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
+; CHECK-LABEL: define i32 @bar() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 4), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
@@ -36,14 +80,22 @@ define i32 @bar() {
; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
; array. The first argument is stored at position 4, since it's right
; justified.
-; CHECK-LABEL: @bar
-; CHECK: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
; Check vector argument.
define i32 @bar2() {
+; CHECK-LABEL: define i32 @bar2() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, <2 x i64> <i64 1, i64 2>)
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, <2 x i64> <i64 1, i64 2>)
ret i32 %1
}
@@ -51,50 +103,110 @@ define i32 @bar2() {
; The vector is at offset 16 of parameter save area, but __msan_va_arg_tls
; corresponds to offset 8+ of parameter save area - so the offset from
; __msan_va_arg_tls is actually misaligned.
-; CHECK-LABEL: @bar2
-; CHECK: store <2 x i64> zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
; Check i64 array.
define i32 @bar4() {
+; CHECK-LABEL: define i32 @bar4() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2])
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2])
ret i32 %1
}
-; CHECK-LABEL: @bar4
-; CHECK: store [2 x i64] zeroinitializer, ptr @__msan_va_arg_tls, align 8
-; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
; Check i128 array.
define i32 @bar5() {
+; CHECK-LABEL: define i32 @bar5() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 40, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2])
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2])
ret i32 %1
}
-; CHECK-LABEL: @bar5
-; CHECK: store [2 x i128] zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls
; Check 8-aligned byval.
define i32 @bar6(ptr %arg) {
+; CHECK-LABEL: define i32 @bar6(
+; CHECK-SAME: ptr [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -246290604621825
+; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 17592186044416
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 8796093022208
+; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i8 0, i64 16, i1 false)
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -246290604621825
+; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 17592186044416
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 8796093022208
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP11]], i64 16, i1 false)
+; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 [[ARG]])
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP12]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 %arg)
ret i32 %1
}
-; CHECK-LABEL: @bar6
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 {{.*}}, i64 16, i1 false)
-; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
; Check 16-aligned byval.
define i32 @bar7(ptr %arg) {
+; CHECK-LABEL: define i32 @bar7(
+; CHECK-SAME: ptr [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -246290604621825
+; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 17592186044416
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 8796093022208
+; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i8 0, i64 32, i1 false)
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -246290604621825
+; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 17592186044416
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 8796093022208
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), ptr align 8 [[TMP11]], i64 32, i1 false)
+; CHECK-NEXT: store i64 40, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 [[ARG]])
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP12]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 %arg)
ret i32 %1
}
-; CHECK-LABEL: @bar7
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), ptr align 8 {{.*}}, i64 32, i1 false)
-; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls
+; UTC_ARGS: --disable
; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
; passed to a variadic function.
@@ -119,6 +231,6 @@ entry:
; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
+; CHECK: getelementptr (i8, ptr @__msan_va_arg_tls, i64 792)
+; CHECK-NOT: getelementptr (i8, ptr @__msan_va_arg_tls, i64 800)
declare i64 @sum(i64 %n, ...)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
index 4151f3b..e3db97c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
@@ -1,9 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le--linux"
define i32 @foo(i32 %guard, ...) {
+; CHECK-LABEL: define i32 @foo(
+; CHECK-SAME: i32 [[GUARD:%.*]], ...) {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -246290604621825
+; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 17592186044416
+; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], 8796093022208
+; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP8]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
+; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -246290604621825
+; CHECK-NEXT: [[TMP11:%.*]] = xor i64 [[TMP10]], 17592186044416
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], 8796093022208
+; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP13]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
+; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8
+; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[TMP16]] to i64
+; CHECK-NEXT: [[TMP18:%.*]] = and i64 [[TMP17]], -246290604621825
+; CHECK-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], 17592186044416
+; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 8796093022208
+; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP21]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 0
+;
%vl = alloca ptr, align 8
call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
@@ -12,37 +50,51 @@ define i32 @foo(i32 %guard, ...) {
ret i32 0
}
-; First, check allocation of the save area.
-
-; CHECK-LABEL: @foo
-; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
-; CHECK: [[C:%.*]] = alloca {{.*}} [[A]]
-
-; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[A]], i1 false)
-
-; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800)
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
-
declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
+; CHECK-LABEL: define i32 @bar() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
ret i32 %1
}
; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
; array.
-; CHECK-LABEL: @bar
-; CHECK: store i32 0, ptr @__msan_va_arg_tls, align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
; Check vector argument.
define i32 @bar2() {
+; CHECK-LABEL: define i32 @bar2() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, <2 x i64> <i64 1, i64 2>)
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, <2 x i64> <i64 1, i64 2>)
ret i32 %1
}
@@ -50,49 +102,109 @@ define i32 @bar2() {
; The vector is at offset 16 of parameter save area, but __msan_va_arg_tls
; corresponds to offset 8+ of parameter save area - so the offset from
; __msan_va_arg_tls is actually misaligned.
-; CHECK-LABEL: @bar2
-; CHECK: store <2 x i64> zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
; Check i64 array.
define i32 @bar4() {
+; CHECK-LABEL: define i32 @bar4() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2])
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2])
ret i32 %1
}
-; CHECK-LABEL: @bar4
-; CHECK: store [2 x i64] zeroinitializer, ptr @__msan_va_arg_tls, align 8
-; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
; Check i128 array.
define i32 @bar5() {
+; CHECK-LABEL: define i32 @bar5() {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 40, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2])
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2])
ret i32 %1
}
-; CHECK-LABEL: @bar5
-; CHECK: store [2 x i128] zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls
; Check 8-aligned byval.
define i32 @bar6(ptr %arg) {
+; CHECK-LABEL: define i32 @bar6(
+; CHECK-SAME: ptr [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -246290604621825
+; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 17592186044416
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 8796093022208
+; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i8 0, i64 16, i1 false)
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -246290604621825
+; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 17592186044416
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 8796093022208
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP11]], i64 16, i1 false)
+; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 [[ARG]])
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP12]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 %arg)
ret i32 %1
}
-; CHECK-LABEL: @bar6
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 {{.*}}, i64 16, i1 false)
-; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
; Check 16-aligned byval.
define i32 @bar7(ptr %arg) {
+; CHECK-LABEL: define i32 @bar7(
+; CHECK-SAME: ptr [[ARG:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -246290604621825
+; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 17592186044416
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 8796093022208
+; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i8 0, i64 32, i1 false)
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -246290604621825
+; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 17592186044416
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 8796093022208
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), ptr align 8 [[TMP11]], i64 32, i1 false)
+; CHECK-NEXT: store i64 40, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 [[ARG]])
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret i32 [[TMP12]]
+;
%1 = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 %arg)
ret i32 %1
}
-; CHECK-LABEL: @bar7
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), ptr align 8 {{.*}}, i64 32, i1 false)
-; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls
+; UTC_ARGS: --disable
; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
; passed to a variadic function.
@@ -117,6 +229,6 @@ entry:
; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
+; CHECK: ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 792)
+; CHECK-NOT: ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 800)
declare i64 @sum(i64 %n, ...)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll
index 1c74431..8ba0330 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll
@@ -16,39 +16,33 @@ define void @Store1(ptr %p, i8 %x) sanitize_memory {
; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 8
-; CHECK-NEXT: [[_MSARG1:%.*]] = inttoptr i32 [[TMP7]] to ptr
+; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 8
; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[_MSARG1]], align 8
-; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP11]], 8
-; CHECK-NEXT: [[_MSARG_O2:%.*]] = inttoptr i32 [[TMP10]] to ptr
+; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 8
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[_MSARG_O2]], align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB12:.*]], label %[[BB13:.*]], !prof [[PROF1:![0-9]+]]
-; CHECK: [[BB12]]:
-; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2:[0-9]+]]
-; CHECK-NEXT: br label %[[BB13]]
-; CHECK: [[BB13]]:
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1:![0-9]+]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT: br label %[[BB7]]
+; CHECK: [[BB7]]:
; CHECK-NEXT: [[TMP15:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr [[P]])
; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 0
; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 1
; CHECK-NEXT: store i8 [[TMP9]], ptr [[TMP16]], align 1
; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i8 [[TMP9]], 0
-; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB17:.*]], label %[[BB19:.*]], !prof [[PROF1]]
-; CHECK: [[BB17]]:
+; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB11:.*]], label %[[BB13:.*]], !prof [[PROF1]]
+; CHECK: [[BB11]]:
; CHECK-NEXT: [[TMP19:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP12]])
; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP17]], align 4
-; CHECK-NEXT: br label %[[BB19]]
-; CHECK: [[BB19]]:
+; CHECK-NEXT: br label %[[BB13]]
+; CHECK: [[BB13]]:
; CHECK-NEXT: store i8 [[X]], ptr [[P]], align 1
; CHECK-NEXT: ret void
;
@@ -70,39 +64,33 @@ define void @Store2(ptr %p, i16 %x) sanitize_memory {
; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 8
-; CHECK-NEXT: [[_MSARG1:%.*]] = inttoptr i32 [[TMP7]] to ptr
+; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 8
; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[_MSARG1]], align 8
-; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP11]], 8
-; CHECK-NEXT: [[_MSARG_O2:%.*]] = inttoptr i32 [[TMP10]] to ptr
+; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 8
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[_MSARG_O2]], align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB12:.*]], label %[[BB13:.*]], !prof [[PROF1]]
-; CHECK: [[BB12]]:
-; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]]
-; CHECK-NEXT: br label %[[BB13]]
-; CHECK: [[BB13]]:
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]]
+; CHECK-NEXT: br label %[[BB7]]
+; CHECK: [[BB7]]:
; CHECK-NEXT: [[TMP15:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_2(ptr [[P]])
; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 0
; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 1
; CHECK-NEXT: store i16 [[TMP9]], ptr [[TMP16]], align 2
; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i16 [[TMP9]], 0
-; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB17:.*]], label %[[BB19:.*]], !prof [[PROF1]]
-; CHECK: [[BB17]]:
+; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB11:.*]], label %[[BB13:.*]], !prof [[PROF1]]
+; CHECK: [[BB11]]:
; CHECK-NEXT: [[TMP19:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP12]])
; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP17]], align 4
-; CHECK-NEXT: br label %[[BB19]]
-; CHECK: [[BB19]]:
+; CHECK-NEXT: br label %[[BB13]]
+; CHECK: [[BB13]]:
; CHECK-NEXT: store i16 [[X]], ptr [[P]], align 2
; CHECK-NEXT: ret void
;
@@ -124,39 +112,33 @@ define void @Store4(ptr %p, i32 %x) sanitize_memory {
; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 8
-; CHECK-NEXT: [[_MSARG1:%.*]] = inttoptr i32 [[TMP7]] to ptr
+; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 8
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[_MSARG1]], align 8
-; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP11]], 8
-; CHECK-NEXT: [[_MSARG_O2:%.*]] = inttoptr i32 [[TMP10]] to ptr
+; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 8
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[_MSARG_O2]], align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB12:.*]], label %[[BB13:.*]], !prof [[PROF1]]
-; CHECK: [[BB12]]:
-; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]]
-; CHECK-NEXT: br label %[[BB13]]
-; CHECK: [[BB13]]:
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]]
+; CHECK-NEXT: br label %[[BB7]]
+; CHECK: [[BB7]]:
; CHECK-NEXT: [[TMP15:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_4(ptr [[P]])
; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 0
; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 1
; CHECK-NEXT: store i32 [[TMP9]], ptr [[TMP16]], align 4
; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i32 [[TMP9]], 0
-; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB17:.*]], label %[[BB19:.*]], !prof [[PROF1]]
-; CHECK: [[BB17]]:
+; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB11:.*]], label %[[BB13:.*]], !prof [[PROF1]]
+; CHECK: [[BB11]]:
; CHECK-NEXT: [[TMP19:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP12]])
; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP17]], align 4
-; CHECK-NEXT: br label %[[BB19]]
-; CHECK: [[BB19]]:
+; CHECK-NEXT: br label %[[BB13]]
+; CHECK: [[BB13]]:
; CHECK-NEXT: store i32 [[X]], ptr [[P]], align 4
; CHECK-NEXT: ret void
;
@@ -178,41 +160,35 @@ define void @Store8(ptr %p, i64 %x) sanitize_memory {
; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 8
-; CHECK-NEXT: [[_MSARG1:%.*]] = inttoptr i32 [[TMP7]] to ptr
+; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 8
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr [[_MSARG1]], align 8
-; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP11]], 8
-; CHECK-NEXT: [[_MSARG_O2:%.*]] = inttoptr i32 [[TMP10]] to ptr
+; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 8
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[_MSARG_O2]], align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB12:.*]], label %[[BB13:.*]], !prof [[PROF1]]
-; CHECK: [[BB12]]:
-; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]]
-; CHECK-NEXT: br label %[[BB13]]
-; CHECK: [[BB13]]:
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]]
+; CHECK-NEXT: br label %[[BB7]]
+; CHECK: [[BB7]]:
; CHECK-NEXT: [[TMP15:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_8(ptr [[P]])
; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 0
; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 1
; CHECK-NEXT: store i64 [[TMP9]], ptr [[TMP16]], align 8
; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB17:.*]], label %[[BB20:.*]], !prof [[PROF1]]
-; CHECK: [[BB17]]:
+; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB11:.*]], label %[[BB14:.*]], !prof [[PROF1]]
+; CHECK: [[BB11]]:
; CHECK-NEXT: [[TMP19:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP12]])
; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP17]], align 8
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP17]], i32 1
; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP20]], align 4
-; CHECK-NEXT: br label %[[BB20]]
-; CHECK: [[BB20]]:
+; CHECK-NEXT: br label %[[BB14]]
+; CHECK: [[BB14]]:
; CHECK-NEXT: store i64 [[X]], ptr [[P]], align 8
; CHECK-NEXT: ret void
;
@@ -234,35 +210,29 @@ define void @Store16(ptr %p, i128 %x) sanitize_memory {
; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 8
-; CHECK-NEXT: [[_MSARG1:%.*]] = inttoptr i32 [[TMP7]] to ptr
+; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 8
; CHECK-NEXT: [[TMP9:%.*]] = load i128, ptr [[_MSARG1]], align 8
-; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP11]], 8
-; CHECK-NEXT: [[_MSARG_O2:%.*]] = inttoptr i32 [[TMP10]] to ptr
+; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 8
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[_MSARG_O2]], align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB12:.*]], label %[[BB13:.*]], !prof [[PROF1]]
-; CHECK: [[BB12]]:
-; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]]
-; CHECK-NEXT: br label %[[BB13]]
-; CHECK: [[BB13]]:
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]]
+; CHECK-NEXT: br label %[[BB7]]
+; CHECK: [[BB7]]:
; CHECK-NEXT: [[TMP15:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_n(ptr [[P]], i32 16)
; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 0
; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 1
; CHECK-NEXT: store i128 [[TMP9]], ptr [[TMP16]], align 8
; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i128 [[TMP9]], 0
-; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB17:.*]], label %[[BB22:.*]], !prof [[PROF1]]
-; CHECK: [[BB17]]:
+; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB11:.*]], label %[[BB16:.*]], !prof [[PROF1]]
+; CHECK: [[BB11]]:
; CHECK-NEXT: [[TMP19:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP12]])
; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP17]], align 8
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP17]], i32 1
@@ -271,8 +241,8 @@ define void @Store16(ptr %p, i128 %x) sanitize_memory {
; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP20]], align 4
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP17]], i32 3
; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP21]], align 4
-; CHECK-NEXT: br label %[[BB22]]
-; CHECK: [[BB22]]:
+; CHECK-NEXT: br label %[[BB16]]
+; CHECK: [[BB16]]:
; CHECK-NEXT: store i128 [[X]], ptr [[P]], align 8
; CHECK-NEXT: ret void
;
@@ -294,20 +264,18 @@ define i8 @Load1(ptr %p) sanitize_memory {
; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
-; CHECK: [[BB6]]:
-; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]]
-; CHECK-NEXT: br label %[[BB7]]
-; CHECK: [[BB7]]:
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
+; CHECK: [[BB4]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]]
+; CHECK-NEXT: br label %[[BB5]]
+; CHECK: [[BB5]]:
; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[P]], align 1
; CHECK-NEXT: [[TMP10:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_1(ptr [[P]])
; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 0
@@ -336,20 +304,18 @@ define i16 @Load2(ptr %p) sanitize_memory {
; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
-; CHECK: [[BB6]]:
-; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]]
-; CHECK-NEXT: br label %[[BB7]]
-; CHECK: [[BB7]]:
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
+; CHECK: [[BB4]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]]
+; CHECK-NEXT: br label %[[BB5]]
+; CHECK: [[BB5]]:
; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[P]], align 2
; CHECK-NEXT: [[TMP10:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_2(ptr [[P]])
; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 0
@@ -378,20 +344,18 @@ define i32 @Load4(ptr %p) sanitize_memory {
; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
-; CHECK: [[BB6]]:
-; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]]
-; CHECK-NEXT: br label %[[BB7]]
-; CHECK: [[BB7]]:
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
+; CHECK: [[BB4]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]]
+; CHECK-NEXT: br label %[[BB5]]
+; CHECK: [[BB5]]:
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_4(ptr [[P]])
; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 0
@@ -420,20 +384,18 @@ define i64 @Load8(ptr %p) sanitize_memory {
; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
-; CHECK: [[BB6]]:
-; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]]
-; CHECK-NEXT: br label %[[BB7]]
-; CHECK: [[BB7]]:
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
+; CHECK: [[BB4]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]]
+; CHECK-NEXT: br label %[[BB5]]
+; CHECK: [[BB5]]:
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr [[P]], align 8
; CHECK-NEXT: [[TMP10:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_8(ptr [[P]])
; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 0
@@ -462,20 +424,18 @@ define i128 @Load16(ptr %p) sanitize_memory {
; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32
-; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32
-; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
-; CHECK: [[BB6]]:
-; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]]
-; CHECK-NEXT: br label %[[BB7]]
-; CHECK: [[BB7]]:
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
+; CHECK: [[BB4]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]]
+; CHECK-NEXT: br label %[[BB5]]
+; CHECK: [[BB5]]:
; CHECK-NEXT: [[TMP9:%.*]] = load i128, ptr [[P]], align 8
; CHECK-NEXT: [[TMP10:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_n(ptr [[P]], i32 16)
; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 0
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
index 29d1fbd..26aaa1e 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
@@ -76,12 +76,12 @@ define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 24) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 4) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 24), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 4), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8
; CHECK-NEXT: store i32 16, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -102,9 +102,9 @@ define i32 @bar2() {
; CHECK-LABEL: define i32 @bar2() {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
-; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8
-; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8
+; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8
; CHECK-NEXT: store i32 24, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, <2 x i64> <i64 1, i64 2>)
@@ -125,9 +125,9 @@ define i32 @bar4() {
; CHECK-LABEL: define i32 @bar4() {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
-; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8
-; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8
+; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8
; CHECK-NEXT: store i32 24, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2])
@@ -145,9 +145,9 @@ define i32 @bar5() {
; CHECK-LABEL: define i32 @bar5() {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
-; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8
-; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8
+; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8
; CHECK-NEXT: store i32 40, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2])
@@ -166,15 +166,15 @@ define i32 @bar6(ptr %arg) {
; CHECK-SAME: ptr [[ARG:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], 2147483647
; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), i8 0, i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i32 8), i8 0, i64 16, i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 2147483647
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), ptr align 8 [[TMP7]], i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), ptr align 8 [[TMP7]], i64 16, i1 false)
; CHECK-NEXT: store i32 24, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP13:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 [[ARG]])
@@ -193,15 +193,15 @@ define i32 @bar7(ptr %arg) {
; CHECK-SAME: ptr [[ARG:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], 2147483647
; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), i8 0, i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i32 8), i8 0, i64 32, i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 2147483647
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), ptr align 8 [[TMP7]], i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), ptr align 8 [[TMP7]], i64 32, i1 false)
; CHECK-NEXT: store i32 40, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP13:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 [[ARG]])
@@ -222,205 +222,205 @@ define dso_local i64 @many_args() {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 792) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 792), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 792), align 8
; CHECK-NEXT: store i32 968, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
index a4d2e16..24f9dc3 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
@@ -76,12 +76,12 @@ define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 24) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 4) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 24), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 4), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8
; CHECK-NEXT: store i32 16, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -101,9 +101,9 @@ define i32 @bar2() {
; CHECK-LABEL: define i32 @bar2() {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
-; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8
-; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8
+; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8
; CHECK-NEXT: store i32 24, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, <2 x i64> <i64 1, i64 2>)
@@ -124,9 +124,9 @@ define i32 @bar4() {
; CHECK-LABEL: define i32 @bar4() {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
-; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8
-; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8
+; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8
; CHECK-NEXT: store i32 24, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2])
@@ -144,9 +144,9 @@ define i32 @bar5() {
; CHECK-LABEL: define i32 @bar5() {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
-; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8
-; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8
+; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8
; CHECK-NEXT: store i32 40, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2])
@@ -165,15 +165,15 @@ define i32 @bar6(ptr %arg) {
; CHECK-SAME: ptr [[ARG:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], 2147483647
; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), i8 0, i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i32 8), i8 0, i64 16, i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 2147483647
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), ptr align 8 [[TMP7]], i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), ptr align 8 [[TMP7]], i64 16, i1 false)
; CHECK-NEXT: store i32 24, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP13:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 [[ARG]])
@@ -192,15 +192,15 @@ define i32 @bar7(ptr %arg) {
; CHECK-SAME: ptr [[ARG:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], 2147483647
; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), i8 0, i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i32 8), i8 0, i64 32, i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 2147483647
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), ptr align 8 [[TMP7]], i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), ptr align 8 [[TMP7]], i64 32, i1 false)
; CHECK-NEXT: store i32 40, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP13:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 [[ARG]])
@@ -220,205 +220,205 @@ define dso_local i64 @many_args() {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 792) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 792), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 792), align 8
; CHECK-NEXT: store i32 968, ptr @__msan_va_arg_overflow_size_tls, align 4
; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll b/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
index 0c6e75c..f707135 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
@@ -59,12 +59,12 @@ define i32 @bar() {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 4), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -87,11 +87,11 @@ define i32 @bar2() {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -112,205 +112,205 @@ define dso_local i64 @many_args() {
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 792), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 792), align 8
; CHECK-NEXT: store i64 960, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx-intrinsics-x86.ll
index 4454568..af8533c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx-intrinsics-x86.ll
@@ -13,7 +13,7 @@ target triple = "x86_64-unknown-linux-gnu"
define <4 x double> @test_x86_avx_addsub_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_addsub_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
@@ -29,7 +29,7 @@ declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nou
define <8 x float> @test_x86_avx_addsub_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_addsub_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
@@ -44,8 +44,8 @@ declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwi
define <4 x double> @test_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_blendv_pd_256(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x double> [[A2:%.*]] to <4 x i64>
@@ -72,8 +72,8 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4
define <8 x float> @test_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_blendv_ps_256(
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x float> [[A2:%.*]] to <8 x i32>
@@ -101,7 +101,7 @@ declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x f
define <4 x double> @test_x86_avx_cmp_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_cmp_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -119,7 +119,7 @@ declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) no
define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_cmp_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
@@ -135,7 +135,7 @@ define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) #0
define <8 x float> @test_x86_avx_cmp_ps_256_pseudo_op(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_cmp_ps_256_pseudo_op(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
@@ -388,7 +388,7 @@ declare <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float>) nounwind readnone
define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_dp_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = select <8 x i1> <i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i32> [[TMP3]], <8 x i32> zeroinitializer
@@ -414,7 +414,7 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i8) nounwi
define <4 x double> @test_x86_avx_hadd_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_hadd_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -432,7 +432,7 @@ declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounw
define <8 x float> @test_x86_avx_hadd_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_hadd_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -450,7 +450,7 @@ declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_x86_avx_hsub_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_hsub_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -468,7 +468,7 @@ declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) nounw
define <8 x float> @test_x86_avx_hsub_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_hsub_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -509,7 +509,7 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.256(ptr) nounwind readonly
define <2 x double> @test_x86_avx_maskload_pd(ptr %a0, <2 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx_maskload_pd(
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP7]], 87960930222080
@@ -535,7 +535,7 @@ declare <2 x double> @llvm.x86.avx.maskload.pd(ptr, <2 x i64>) nounwind readonly
define <4 x double> @test_x86_avx_maskload_pd_256(ptr %a0, <4 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx_maskload_pd_256(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP7]], 87960930222080
@@ -561,7 +561,7 @@ declare <4 x double> @llvm.x86.avx.maskload.pd.256(ptr, <4 x i64>) nounwind read
define <4 x float> @test_x86_avx_maskload_ps(ptr %a0, <4 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx_maskload_ps(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP7]], 87960930222080
@@ -587,7 +587,7 @@ declare <4 x float> @llvm.x86.avx.maskload.ps(ptr, <4 x i32>) nounwind readonly
define <8 x float> @test_x86_avx_maskload_ps_256(ptr %a0, <8 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx_maskload_ps_256(
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP7]], 87960930222080
@@ -613,9 +613,9 @@ declare <8 x float> @llvm.x86.avx.maskload.ps.256(ptr, <8 x i32>) nounwind reado
define void @test_x86_avx_maskstore_pd(ptr %a0, <2 x i64> %mask, <2 x double> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_maskstore_pd(
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP4]], 87960930222080
@@ -642,9 +642,9 @@ declare void @llvm.x86.avx.maskstore.pd(ptr, <2 x i64>, <2 x double>) nounwind
define void @test_x86_avx_maskstore_pd_256(ptr %a0, <4 x i64> %mask, <4 x double> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_maskstore_pd_256(
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP4]], 87960930222080
@@ -671,9 +671,9 @@ declare void @llvm.x86.avx.maskstore.pd.256(ptr, <4 x i64>, <4 x double>) nounwi
define void @test_x86_avx_maskstore_ps(ptr %a0, <4 x i32> %mask, <4 x float> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_maskstore_ps(
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP4]], 87960930222080
@@ -700,9 +700,9 @@ declare void @llvm.x86.avx.maskstore.ps(ptr, <4 x i32>, <4 x float>) nounwind
define void @test_x86_avx_maskstore_ps_256(ptr %a0, <8 x i32> %mask, <8 x float> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_maskstore_ps_256(
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP4]], 87960930222080
@@ -730,7 +730,7 @@ declare void @llvm.x86.avx.maskstore.ps.256(ptr, <8 x i32>, <8 x float>) nounwin
define <4 x double> @test_x86_avx_max_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_max_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
@@ -746,7 +746,7 @@ declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwi
define <8 x float> @test_x86_avx_max_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_max_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
@@ -762,7 +762,7 @@ declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_x86_avx_min_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_min_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
@@ -778,7 +778,7 @@ declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwi
define <8 x float> @test_x86_avx_min_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_min_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
@@ -836,7 +836,7 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone
define i32 @test_x86_avx_ptestc_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_ptestc_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -855,7 +855,7 @@ declare i32 @llvm.x86.avx.ptestc.256(<4 x i64>, <4 x i64>) nounwind readnone
define i32 @test_x86_avx_ptestnzc_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_ptestnzc_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -874,7 +874,7 @@ declare i32 @llvm.x86.avx.ptestnzc.256(<4 x i64>, <4 x i64>) nounwind readnone
define i32 @test_x86_avx_ptestz_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_ptestz_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -948,7 +948,7 @@ declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone
define <2 x double> @test_x86_avx_vpermilvar_pd(<2 x double> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vpermilvar_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[A1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[A1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = trunc <2 x i64> [[A1]] to <2 x i1>
; CHECK-NEXT: [[A0:%.*]] = bitcast <2 x i64> [[TMP1]] to <2 x double>
@@ -974,7 +974,7 @@ declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) nounwi
define <4 x double> @test_x86_avx_vpermilvar_pd_256(<4 x double> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vpermilvar_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[A1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[A1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i64> [[A1]] to <4 x i2>
; CHECK-NEXT: [[A0:%.*]] = bitcast <4 x i64> [[TMP1]] to <4 x double>
@@ -1014,7 +1014,7 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) #0 {
define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vpermilvar_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[A1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[A1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i32> [[A1]] to <4 x i2>
; CHECK-NEXT: [[A0:%.*]] = bitcast <4 x i32> [[TMP1]] to <4 x float>
@@ -1036,7 +1036,7 @@ define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) #
}
define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, ptr %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vpermilvar_ps_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -1075,7 +1075,7 @@ declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) nounwind
define <8 x float> @test_x86_avx_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vpermilvar_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[A1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[A1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = trunc <8 x i32> [[A1]] to <8 x i3>
; CHECK-NEXT: [[A0:%.*]] = bitcast <8 x i32> [[TMP1]] to <8 x float>
@@ -1101,7 +1101,7 @@ declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) noun
define i32 @test_x86_avx_vtestc_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestc_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -1120,7 +1120,7 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnon
define i32 @test_x86_avx_vtestc_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestc_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -1139,7 +1139,7 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind rea
define i32 @test_x86_avx_vtestc_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestc_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
@@ -1158,7 +1158,7 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_avx_vtestc_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestc_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
@@ -1177,7 +1177,7 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readn
define i32 @test_x86_avx_vtestnzc_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestnzc_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -1196,7 +1196,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_avx_vtestnzc_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestnzc_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -1215,7 +1215,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double>, <4 x double>) nounwind r
define i32 @test_x86_avx_vtestnzc_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestnzc_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
@@ -1234,7 +1234,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps(<4 x float>, <4 x float>) nounwind readnon
define i32 @test_x86_avx_vtestnzc_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestnzc_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
@@ -1253,7 +1253,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float>, <8 x float>) nounwind rea
define i32 @test_x86_avx_vtestz_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestz_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -1272,7 +1272,7 @@ declare i32 @llvm.x86.avx.vtestz.pd(<2 x double>, <2 x double>) nounwind readnon
define i32 @test_x86_avx_vtestz_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestz_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -1291,7 +1291,7 @@ declare i32 @llvm.x86.avx.vtestz.pd.256(<4 x double>, <4 x double>) nounwind rea
define i32 @test_x86_avx_vtestz_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestz_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
@@ -1310,7 +1310,7 @@ declare i32 @llvm.x86.avx.vtestz.ps(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_avx_vtestz_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestz_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
@@ -1351,7 +1351,7 @@ declare void @llvm.x86.avx.vzeroupper() nounwind
define void @movnt_dq(ptr %p, <2 x i64> %a1) nounwind #0 {
; CHECK-LABEL: @movnt_dq(
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
@@ -1381,7 +1381,7 @@ declare void @llvm.x86.avx.movnt.dq.256(ptr, <4 x i64>) nounwind
define void @movnt_ps(ptr %p, <8 x float> %a) nounwind #0 {
; CHECK-LABEL: @movnt_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1404,7 +1404,7 @@ declare void @llvm.x86.avx.movnt.ps.256(ptr, <8 x float>) nounwind
define void @movnt_pd(ptr %p, <4 x double> %a1) nounwind #0 {
; add operation forces the execution domain.
; CHECK-LABEL: @movnt_pd(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer
@@ -1432,7 +1432,7 @@ declare void @llvm.x86.avx.movnt.pd.256(ptr, <4 x double>) nounwind
define <2 x i64> @test_x86_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_pclmulqdq(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> poison, <2 x i32> zeroinitializer
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll
index 991467e..8900085 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll
@@ -16,8 +16,8 @@ define <16 x float> @test_mm512_dpph_ps(<16 x float> %__W, <32 x half> %__A, <32
; CHECK-LABEL: define <16 x float> @test_mm512_dpph_ps(
; CHECK-SAME: <16 x float> [[__W:%.*]], <32 x half> [[__A:%.*]], <32 x half> [[__B:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -44,9 +44,9 @@ define <16 x float> @test_mm512_mask_dpph_ps(<16 x float> %__W, i16 zeroext %__U
; CHECK-LABEL: define <16 x float> @test_mm512_mask_dpph_ps(
; CHECK-SAME: <16 x float> [[__W:%.*]], i16 zeroext [[__U:%.*]], <32 x half> [[__A:%.*]], <32 x half> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -84,9 +84,9 @@ define <16 x float> @test_mm512_mask_dpph_ps(<16 x float> %__W, i16 zeroext %__U
define <16 x float> @test_mm512_maskz_dpph_ps(i16 zeroext %__U, <16 x float> %__W, <32 x half> %__A, <32 x half> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x float> @test_mm512_maskz_dpph_ps(
; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x float> [[__W:%.*]], <32 x half> [[__A:%.*]], <32 x half> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
@@ -127,8 +127,8 @@ declare <16 x float> @llvm.x86.avx10.vdpphps.512(<16 x float>, <32 x half>, <32
define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_dpbssd_epi32(
; CHECK-SAME: <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -168,10 +168,10 @@ define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr
define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpbssds_epi32(
; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP24:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP25:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP25:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <64 x i8> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <64 x i8> [[TMP25]], zeroinitializer
@@ -208,9 +208,9 @@ define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %_
define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpbssd_epi32(
; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP25:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP26:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP25:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP26:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <64 x i8> [[TMP25]], zeroinitializer
@@ -251,8 +251,8 @@ declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <64 x i8>, <64 x i8
define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_dpbsud_epi32(
; CHECK-SAME: <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -292,10 +292,10 @@ define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr
define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpbsuds_epi32(
; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer
@@ -332,9 +332,9 @@ define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %_
define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpbsud_epi32(
; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP19:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP19:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer
@@ -375,8 +375,8 @@ declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <64 x i8>, <64 x i8
define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_dpbuud_epi32(
; CHECK-SAME: <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -416,10 +416,10 @@ define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr
define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpbuuds_epi32(
; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer
@@ -456,9 +456,9 @@ define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %_
define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpbuud_epi32(
; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP19:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP19:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer
@@ -500,9 +500,9 @@ declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <64 x i8>, <64 x i8
define <16 x i32> @test_mm512_dpwsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_dpwsud_epi32(
; CHECK-SAME: <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -530,9 +530,9 @@ define <16 x i32> @test_mm512_mask_dpwsuds_epi32(<16 x i32> %__W, i16 zeroext %_
; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpwsuds_epi32(
; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]]
@@ -557,9 +557,9 @@ define <16 x i32> @test_mm512_mask_dpwsuds_epi32(<16 x i32> %__W, i16 zeroext %_
define <16 x i32> @test_mm512_maskz_dpwsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpwsud_epi32(
; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
@@ -588,9 +588,9 @@ declare <16 x i32> @llvm.x86.avx10.vpdpwsuds.512(<16 x i32>, <16 x i32>, <16 x i
define <16 x i32> @test_mm512_dpwusd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_dpwusd_epi32(
; CHECK-SAME: <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -618,9 +618,9 @@ define <16 x i32> @test_mm512_mask_dpwusds_epi32(<16 x i32> %__W, i16 zeroext %_
; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpwusds_epi32(
; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]]
@@ -645,9 +645,9 @@ define <16 x i32> @test_mm512_mask_dpwusds_epi32(<16 x i32> %__W, i16 zeroext %_
define <16 x i32> @test_mm512_maskz_dpwusd_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpwusd_epi32(
; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
@@ -676,9 +676,9 @@ declare <16 x i32> @llvm.x86.avx10.vpdpwusds.512(<16 x i32>, <16 x i32>, <16 x i
define <16 x i32> @test_mm512_dpwuud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_dpwuud_epi32(
; CHECK-SAME: <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -706,9 +706,9 @@ define <16 x i32> @test_mm512_mask_dpwuuds_epi32(<16 x i32> %__W, i16 zeroext %_
; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpwuuds_epi32(
; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]]
@@ -733,9 +733,9 @@ define <16 x i32> @test_mm512_mask_dpwuuds_epi32(<16 x i32> %__W, i16 zeroext %_
define <16 x i32> @test_mm512_maskz_dpwuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpwuud_epi32(
; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
@@ -765,10 +765,10 @@ declare <16 x i32> @llvm.x86.avx10.vpdpwuuds.512(<16 x i32>, <16 x i32>, <16 x i
define { <32 x i16>, <32 x i16>, <32 x i16> } @test_mm512_mask_mpsadbw(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x3, i32 %x4) sanitize_memory {
; CHECK-LABEL: define { <32 x i16>, <32 x i16>, <32 x i16> } @test_mm512_mask_mpsadbw(
; CHECK-SAME: <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]], <32 x i16> [[X3:%.*]], i32 [[X4:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i32 [[X4]] to <32 x i1>
@@ -844,7 +844,7 @@ define <8 x float> @avx_dp_ps(<8 x float> %a, <8 x float> %b) sanitize_memory {
; CHECK-LABEL: define <8 x float> @avx_dp_ps(
; CHECK-SAME: <8 x float> [[A:%.*]], <8 x float> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = select <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i32> [[TMP3]], <8 x i32> zeroinitializer
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll
index 373eff6..def7ba3 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll
@@ -19,8 +19,8 @@ define <4 x float> @test_mm_dpph_ps(<4 x float> %__W, <8 x half> %__A, <8 x half
; CHECK-LABEL: define <4 x float> @test_mm_dpph_ps(
; CHECK-SAME: <4 x float> [[__W:%.*]], <8 x half> [[__A:%.*]], <8 x half> [[__B:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -47,9 +47,9 @@ define <4 x float> @test_mm_mask_dpph_ps(<4 x float> %__W, i8 zeroext %__U, <8 x
; CHECK-LABEL: define <4 x float> @test_mm_mask_dpph_ps(
; CHECK-SAME: <4 x float> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x half> [[__A:%.*]], <8 x half> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -90,9 +90,9 @@ define <4 x float> @test_mm_mask_dpph_ps(<4 x float> %__W, i8 zeroext %__U, <8 x
define <4 x float> @test_mm_maskz_dpph_ps(i8 zeroext %__U, <4 x float> %__W, <8 x half> %__A, <8 x half> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x float> @test_mm_maskz_dpph_ps(
; CHECK-SAME: i8 zeroext [[__U:%.*]], <4 x float> [[__W:%.*]], <8 x half> [[__A:%.*]], <8 x half> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
@@ -134,8 +134,8 @@ define <8 x float> @test_mm256_dpph_ps(<8 x float> %__W, <16 x half> %__A, <16 x
; CHECK-LABEL: define <8 x float> @test_mm256_dpph_ps(
; CHECK-SAME: <8 x float> [[__W:%.*]], <16 x half> [[__A:%.*]], <16 x half> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -162,9 +162,9 @@ define <8 x float> @test_mm256_mask_dpph_ps(<8 x float> %__W, i8 zeroext %__U, <
; CHECK-LABEL: define <8 x float> @test_mm256_mask_dpph_ps(
; CHECK-SAME: <8 x float> [[__W:%.*]], i8 zeroext [[__U:%.*]], <16 x half> [[__A:%.*]], <16 x half> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -202,9 +202,9 @@ define <8 x float> @test_mm256_mask_dpph_ps(<8 x float> %__W, i8 zeroext %__U, <
define <8 x float> @test_mm256_maskz_dpph_ps(i8 zeroext %__U, <8 x float> %__W, <16 x half> %__A, <16 x half> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x float> @test_mm256_maskz_dpph_ps(
; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x float> [[__W:%.*]], <16 x half> [[__A:%.*]], <16 x half> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
@@ -246,10 +246,10 @@ declare <8 x float> @llvm.x86.avx10.vdpphps.256(<8 x float>, <16 x half>, <16 x
define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpbssd_epi32(
; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP25:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP25:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <16 x i8> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[TMP25]], zeroinitializer
@@ -286,9 +286,9 @@ define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <16
define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpbssds_epi32(
; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP25:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP26:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP24:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP25:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP26:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <16 x i8> [[TMP25]], zeroinitializer
@@ -326,10 +326,10 @@ define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <
define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpbssds_epi32(
; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP24:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP25:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP25:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <32 x i8> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[TMP25]], zeroinitializer
@@ -366,9 +366,9 @@ define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U
define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpbssd_epi32(
; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP25:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP26:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP24:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP25:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP26:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <32 x i8> [[TMP25]], zeroinitializer
@@ -411,10 +411,10 @@ declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>)
define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpbsud_epi32(
; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <16 x i8> [[TMP2]], zeroinitializer
@@ -451,9 +451,9 @@ define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16
define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpbsuds_epi32(
; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
@@ -491,10 +491,10 @@ define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <
define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(
; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <32 x i8> [[TMP2]], zeroinitializer
@@ -531,9 +531,9 @@ define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U
define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpbsud_epi32(
; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
@@ -576,10 +576,10 @@ declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpbuud_epi32(
; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <16 x i8> [[TMP2]], zeroinitializer
@@ -616,9 +616,9 @@ define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16
define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpbuuds_epi32(
; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
@@ -656,10 +656,10 @@ define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <
define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(
; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <32 x i8> [[TMP2]], zeroinitializer
@@ -696,9 +696,9 @@ define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U
define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpbuud_epi32(
; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
@@ -743,9 +743,9 @@ define <4 x i32> @test_mm_mask_dpwsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpwsud_epi32(
; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -770,9 +770,9 @@ define <4 x i32> @test_mm_mask_dpwsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
define <4 x i32> @test_mm_maskz_dpwsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpwsuds_epi32(
; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -799,9 +799,9 @@ define <8 x i32> @test_mm256_maskz_dpwsuds_epi32(<8 x i32> %__W, i8 zeroext %__U
; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpwsuds_epi32(
; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
@@ -826,9 +826,9 @@ define <8 x i32> @test_mm256_maskz_dpwsuds_epi32(<8 x i32> %__W, i8 zeroext %__U
define <8 x i32> @test_mm256_mask_dpwsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpwsud_epi32(
; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -860,9 +860,9 @@ define <4 x i32> @test_mm_mask_dpwusd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpwusd_epi32(
; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -887,9 +887,9 @@ define <4 x i32> @test_mm_mask_dpwusd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
define <4 x i32> @test_mm_maskz_dpwusds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpwusds_epi32(
; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -916,9 +916,9 @@ define <8 x i32> @test_mm256_maskz_dpwusds_epi32(<8 x i32> %__W, i8 zeroext %__U
; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpwusds_epi32(
; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
@@ -943,9 +943,9 @@ define <8 x i32> @test_mm256_maskz_dpwusds_epi32(<8 x i32> %__W, i8 zeroext %__U
define <8 x i32> @test_mm256_mask_dpwusd_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpwusd_epi32(
; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -977,9 +977,9 @@ define <4 x i32> @test_mm_mask_dpwuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpwuud_epi32(
; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -1004,9 +1004,9 @@ define <4 x i32> @test_mm_mask_dpwuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
define <4 x i32> @test_mm_maskz_dpwuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpwuuds_epi32(
; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -1033,9 +1033,9 @@ define <8 x i32> @test_mm256_maskz_dpwuuds_epi32(<8 x i32> %__W, i8 zeroext %__U
; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpwuuds_epi32(
; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
@@ -1060,9 +1060,9 @@ define <8 x i32> @test_mm256_maskz_dpwuuds_epi32(<8 x i32> %__W, i8 zeroext %__U
define <8 x i32> @test_mm256_mask_dpwuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpwuud_epi32(
; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -1094,10 +1094,10 @@ declare <8 x i32> @llvm.x86.avx2.vpdpwuuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
define { <8 x i16>, <8 x i16>, <8 x i16> } @test_mask_mpsadbw_128(<16 x i8> %x0, <16 x i8> %x1, <8 x i16> %x3, i8 %x4) sanitize_memory {
; CHECK-LABEL: define { <8 x i16>, <8 x i16>, <8 x i16> } @test_mask_mpsadbw_128(
; CHECK-SAME: <16 x i8> [[X0:%.*]], <16 x i8> [[X1:%.*]], <8 x i16> [[X3:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[X4]] to <8 x i1>
@@ -1169,10 +1169,10 @@ define { <8 x i16>, <8 x i16>, <8 x i16> } @test_mask_mpsadbw_128(<16 x i8> %x0,
define { <16 x i16>, <16 x i16>, <16 x i16> } @test_mask_mpsadbw_256(<32 x i8> %x0, <32 x i8> %x1, <16 x i16> %x3, i16 %x4) sanitize_memory {
; CHECK-LABEL: define { <16 x i16>, <16 x i16>, <16 x i16> } @test_mask_mpsadbw_256(
; CHECK-SAME: <32 x i8> [[X0:%.*]], <32 x i8> [[X1:%.*]], <16 x i16> [[X3:%.*]], i16 [[X4:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[X4]] to <16 x i1>
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx2-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx2-intrinsics-x86.ll
index 29269ff..e447cab 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx2-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx2-intrinsics-x86.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_packssdw(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
@@ -40,7 +40,7 @@ define <16 x i16> @test_x86_avx2_packssdw_fold() #0 {
define <32 x i8> @test_x86_avx2_packsswb(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_packsswb(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i16>
@@ -73,7 +73,7 @@ define <32 x i8> @test_x86_avx2_packsswb_fold() #0 {
define <32 x i8> @test_x86_avx2_packuswb(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_packuswb(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i16>
@@ -106,7 +106,7 @@ define <32 x i8> @test_x86_avx2_packuswb_fold() #0 {
define <32 x i8> @test_x86_avx2_pavg_b(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pavg_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]])
@@ -122,7 +122,7 @@ declare <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i16> @test_x86_avx2_pavg_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pavg_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -138,7 +138,7 @@ declare <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16>, <16 x i16>) nounwind readno
define <8 x i32> @test_x86_avx2_pmadd_wd(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmadd_wd(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i16> [[TMP2]], zeroinitializer
@@ -187,7 +187,7 @@ declare i32 @llvm.x86.avx2.pmovmskb(<32 x i8>) nounwind readnone
define <16 x i16> @test_x86_avx2_pmulh_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmulh_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -203,7 +203,7 @@ declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @test_x86_avx2_pmulhu_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmulhu_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -219,7 +219,7 @@ declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind read
define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psad_bw(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i8> [[TMP3]] to <4 x i64>
@@ -239,7 +239,7 @@ declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psll_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -261,7 +261,7 @@ declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psll_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -283,7 +283,7 @@ declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone
define <16 x i16> @test_x86_avx2_psll_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psll_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -353,7 +353,7 @@ declare <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16>, i32) nounwind readnone
define <8 x i32> @test_x86_avx2_psra_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psra_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -375,7 +375,7 @@ declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_psra_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psra_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -429,7 +429,7 @@ declare <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16>, i32) nounwind readnone
define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrl_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -451,7 +451,7 @@ declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrl_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -473,7 +473,7 @@ declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone
define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrl_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -494,7 +494,7 @@ declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnon
define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, ptr %p) #0 {
; CHECK-LABEL: @test_x86_avx2_psrl_w_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -576,7 +576,7 @@ declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) nounwind readnone
define <8 x i32> @test_x86_avx2_phadd_d(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phadd_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -594,7 +594,7 @@ declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_phadd_sw(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phadd_sw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
@@ -612,7 +612,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind read
define <16 x i16> @test_x86_avx2_phadd_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phadd_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
@@ -630,7 +630,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readn
define <8 x i32> @test_x86_avx2_phsub_d(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phsub_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -648,7 +648,7 @@ declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_phsub_sw(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phsub_sw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
@@ -666,7 +666,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind read
define <16 x i16> @test_x86_avx2_phsub_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phsub_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
@@ -684,7 +684,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @test_x86_avx2_pmadd_ub_sw(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmadd_ub_sw(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i8> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i8> [[TMP2]], zeroinitializer
@@ -711,7 +711,7 @@ declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind rea
define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(ptr %ptr, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmadd_ub_sw_load_op0(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -749,7 +749,7 @@ define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(ptr %ptr, <32 x i8> %a1) #
define <16 x i16> @test_x86_avx2_pmul_hr_sw(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmul_hr_sw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -765,7 +765,7 @@ declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind re
define <32 x i8> @test_x86_avx2_pshuf_b(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pshuf_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> [[TMP1]], <32 x i8> [[A1:%.*]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP2]], [[TMP3]]
@@ -782,7 +782,7 @@ declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone
define <32 x i8> @test_x86_avx2_psign_b(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psign_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]])
@@ -798,7 +798,7 @@ declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_x86_avx2_psign_d(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psign_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
@@ -814,7 +814,7 @@ declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_psign_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psign_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -830,7 +830,7 @@ declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_mpsadbw(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <32 x i8> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -854,7 +854,7 @@ declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind rea
define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(ptr %ptr, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_mpsadbw_load_op0(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -889,7 +889,7 @@ define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(ptr %ptr, <32 x i8> %a1) #0 {
define <16 x i16> @test_x86_avx2_packusdw(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_packusdw(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
@@ -921,8 +921,8 @@ define <16 x i16> @test_x86_avx2_packusdw_fold() #0 {
define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) #0 {
; CHECK-LABEL: @test_x86_avx2_pblendvb(
-; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ashr <32 x i8> [[A2:%.*]], splat (i8 7)
@@ -947,7 +947,7 @@ declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounw
define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pblendw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 16, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]], <16 x i32> <i32 16, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -963,7 +963,7 @@ declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind r
define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pblendd_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> <i32 4, i32 5, i32 6, i32 3>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[A0:%.*]], <4 x i32> [[A1:%.*]], <4 x i32> <i32 4, i32 5, i32 6, i32 3>
@@ -979,7 +979,7 @@ declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i8) nounwind
define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pblendd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]], <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -995,7 +995,7 @@ declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i8) nounwind
define <8 x i32> @test_x86_avx2_permd(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_permd(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
@@ -1011,7 +1011,7 @@ declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_permps(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1035,7 +1035,7 @@ declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind reado
define <2 x i64> @test_x86_avx2_maskload_q(ptr %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_maskload_q(
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP6]], 87960930222080
@@ -1060,7 +1060,7 @@ declare <2 x i64> @llvm.x86.avx2.maskload.q(ptr, <2 x i64>) nounwind readonly
define <4 x i64> @test_x86_avx2_maskload_q_256(ptr %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_maskload_q_256(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP6]], 87960930222080
@@ -1085,7 +1085,7 @@ declare <4 x i64> @llvm.x86.avx2.maskload.q.256(ptr, <4 x i64>) nounwind readonl
define <4 x i32> @test_x86_avx2_maskload_d(ptr %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_maskload_d(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP6]], 87960930222080
@@ -1110,7 +1110,7 @@ declare <4 x i32> @llvm.x86.avx2.maskload.d(ptr, <4 x i32>) nounwind readonly
define <8 x i32> @test_x86_avx2_maskload_d_256(ptr %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_maskload_d_256(
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP6]], 87960930222080
@@ -1135,9 +1135,9 @@ declare <8 x i32> @llvm.x86.avx2.maskload.d.256(ptr, <8 x i32>) nounwind readonl
define void @test_x86_avx2_maskstore_q(ptr %a0, <2 x i64> %a1, <2 x i64> %a2) #0 {
; CHECK-LABEL: @test_x86_avx2_maskstore_q(
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP4]], 87960930222080
@@ -1163,9 +1163,9 @@ declare void @llvm.x86.avx2.maskstore.q(ptr, <2 x i64>, <2 x i64>) nounwind
define void @test_x86_avx2_maskstore_q_256(ptr %a0, <4 x i64> %a1, <4 x i64> %a2) #0 {
; CHECK-LABEL: @test_x86_avx2_maskstore_q_256(
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP4]], 87960930222080
@@ -1191,9 +1191,9 @@ declare void @llvm.x86.avx2.maskstore.q.256(ptr, <4 x i64>, <4 x i64>) nounwind
define void @test_x86_avx2_maskstore_d(ptr %a0, <4 x i32> %a1, <4 x i32> %a2) #0 {
; CHECK-LABEL: @test_x86_avx2_maskstore_d(
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP4]], 87960930222080
@@ -1219,9 +1219,9 @@ declare void @llvm.x86.avx2.maskstore.d(ptr, <4 x i32>, <4 x i32>) nounwind
define void @test_x86_avx2_maskstore_d_256(ptr %a0, <8 x i32> %a1, <8 x i32> %a2) #0 {
; CHECK-LABEL: @test_x86_avx2_maskstore_d_256(
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP4]], 87960930222080
@@ -1248,7 +1248,7 @@ declare void @llvm.x86.avx2.maskstore.d.256(ptr, <8 x i32>, <8 x i32>) nounwind
define <4 x i32> @test_x86_avx2_psllv_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
@@ -1287,7 +1287,7 @@ declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_psllv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
@@ -1326,7 +1326,7 @@ declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x i64> @test_x86_avx2_psllv_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
@@ -1357,7 +1357,7 @@ declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_x86_avx2_psllv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_q_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i64>
@@ -1389,7 +1389,7 @@ declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind read
define <4 x i32> @test_x86_avx2_psrlv_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
@@ -1428,7 +1428,7 @@ declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_psrlv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
@@ -1467,7 +1467,7 @@ declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x i64> @test_x86_avx2_psrlv_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
@@ -1499,7 +1499,7 @@ declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_x86_avx2_psrlv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_q_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i64>
@@ -1532,7 +1532,7 @@ declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind read
define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrav_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
@@ -1563,7 +1563,7 @@ declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrav_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
@@ -1594,9 +1594,9 @@ declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, ptr %a1, <4 x i32> %idx, <2 x double> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1627,9 +1627,9 @@ declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, ptr,
define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, ptr %a1, <4 x i32> %idx, <4 x double> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1660,9 +1660,9 @@ declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, ptr,
define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, ptr %a1, <2 x i64> %idx, <2 x double> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1693,9 +1693,9 @@ declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, ptr,
define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, ptr %a1, <4 x i64> %idx, <4 x double> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1726,9 +1726,9 @@ declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, ptr,
define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, ptr %a1, <4 x i32> %idx, <4 x float> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1759,9 +1759,9 @@ declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, ptr,
define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, ptr %a1, <8 x i32> %idx, <8 x float> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1792,9 +1792,9 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, ptr,
define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, ptr %a1, <2 x i64> %idx, <4 x float> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1825,9 +1825,9 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, ptr,
define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, ptr %a1, <4 x i64> %idx, <4 x float> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1858,9 +1858,9 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, ptr,
define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, ptr %a1, <4 x i32> %idx, <2 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1891,9 +1891,9 @@ declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, ptr,
define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, ptr %a1, <4 x i32> %idx, <4 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_q_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1924,9 +1924,9 @@ declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, ptr,
define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, ptr %a1, <2 x i64> %idx, <2 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1957,9 +1957,9 @@ declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, ptr,
define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, ptr %a1, <4 x i64> %idx, <4 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_q_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1990,9 +1990,9 @@ declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, ptr,
define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, ptr %a1, <4 x i32> %idx, <4 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -2023,9 +2023,9 @@ declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, ptr,
define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, ptr %a1, <8 x i32> %idx, <8 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -2056,9 +2056,9 @@ declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, ptr,
define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, ptr %a1, <2 x i64> %idx, <4 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -2089,9 +2089,9 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, ptr,
define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, ptr %a1, <4 x i64> %idx, <4 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -2122,10 +2122,10 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, ptr,
define <8 x float> @test_gather_mask(<8 x float> %a0, ptr %a, <8 x i32> %idx, <8 x float> %mask, ptr nocapture %out) #0 {
; CHECK-LABEL: @test_gather_mask(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP6]], 0
@@ -2167,10 +2167,10 @@ define <8 x float> @test_gather_mask(<8 x float> %a0, ptr %a, <8 x i32> %idx, <
define <2 x i64> @test_mask_demanded_bits(<2 x i64> %a0, ptr %a1, <2 x i64> %idx, <2 x i1> %mask) #0 {
; CHECK-LABEL: @test_mask_demanded_bits(
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
; CHECK-NEXT: [[MASK1:%.*]] = sext <2 x i1> [[MASK:%.*]] to <2 x i64>
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll
index 43da02d..17bef29 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll
@@ -19,10 +19,10 @@ target triple = "x86_64-unknown-linux-gnu"
declare <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8>, <16 x i8>, i8)
define { <16 x i8>, <16 x i8>, <16 x i8> } @test_vgf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_vgf2p8affineinvqb_128(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -94,10 +94,10 @@ define { <16 x i8>, <16 x i8>, <16 x i8> } @test_vgf2p8affineinvqb_128(<16 x i8>
declare <32 x i8> @llvm.x86.vgf2p8affineinvqb.256(<32 x i8>, <32 x i8>, i8)
define { <32 x i8>, <32 x i8>, <32 x i8> } @test_vgf2p8affineinvqb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) #0 {
; CHECK-LABEL: @test_vgf2p8affineinvqb_256(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
@@ -169,10 +169,10 @@ define { <32 x i8>, <32 x i8>, <32 x i8> } @test_vgf2p8affineinvqb_256(<32 x i8>
declare <64 x i8> @llvm.x86.vgf2p8affineinvqb.512(<64 x i8>, <64 x i8>, i8)
define { <64 x i8>, <64 x i8>, <64 x i8> } @test_vgf2p8affineinvqb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) #0 {
; CHECK-LABEL: @test_vgf2p8affineinvqb_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64 [[TMP1]] to <64 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1>
@@ -244,10 +244,10 @@ define { <64 x i8>, <64 x i8>, <64 x i8> } @test_vgf2p8affineinvqb_512(<64 x i8>
declare <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8)
define { <16 x i8>, <16 x i8>, <16 x i8> } @test_vgf2p8affineqb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_vgf2p8affineqb_128(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -308,10 +308,10 @@ define { <16 x i8>, <16 x i8>, <16 x i8> } @test_vgf2p8affineqb_128(<16 x i8> %s
declare <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8>, <32 x i8>, i8)
define { <32 x i8>, <32 x i8>, <32 x i8> } @test_vgf2p8affineqb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) #0 {
; CHECK-LABEL: @test_vgf2p8affineqb_256(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
@@ -372,10 +372,10 @@ define { <32 x i8>, <32 x i8>, <32 x i8> } @test_vgf2p8affineqb_256(<32 x i8> %s
declare <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8>, <64 x i8>, i8)
define { <64 x i8>, <64 x i8>, <64 x i8> } @test_vgf2p8affineqb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) #0 {
; CHECK-LABEL: @test_vgf2p8affineqb_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64 [[TMP1]] to <64 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1>
@@ -437,7 +437,7 @@ declare <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8>, <16 x i8>)
define <16 x i8> @test_vgf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2) #0 {
; CHECK-LABEL: @test_vgf2p8mulb_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8> [[SRC1:%.*]], <16 x i8> [[SRC2:%.*]])
@@ -450,10 +450,10 @@ define <16 x i8> @test_vgf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2) #0 {
define <16 x i8> @test_vgf2p8mulb_128_mask(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_vgf2p8mulb_128_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -476,9 +476,9 @@ define <16 x i8> @test_vgf2p8mulb_128_mask(<16 x i8> %src1, <16 x i8> %src2, <16
define <16 x i8> @test_vgf2p8mulb_128_maskz(<16 x i8> %src1, <16 x i8> %src2, i16 %mask) #0 {
; CHECK-LABEL: @test_vgf2p8mulb_128_maskz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -503,7 +503,7 @@ declare <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8>, <32 x i8>)
define <32 x i8> @test_vgf2p8mulb_256(<32 x i8> %src1, <32 x i8> %src2) #0 {
; CHECK-LABEL: @test_vgf2p8mulb_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8> [[SRC1:%.*]], <32 x i8> [[SRC2:%.*]])
@@ -516,10 +516,10 @@ define <32 x i8> @test_vgf2p8mulb_256(<32 x i8> %src1, <32 x i8> %src2) #0 {
define <32 x i8> @test_vgf2p8mulb_256_mask(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) #0 {
; CHECK-LABEL: @test_vgf2p8mulb_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
@@ -542,9 +542,9 @@ define <32 x i8> @test_vgf2p8mulb_256_mask(<32 x i8> %src1, <32 x i8> %src2, <32
define <32 x i8> @test_vgf2p8mulb_256_maskz(<32 x i8> %src1, <32 x i8> %src2, i32 %mask) #0 {
; CHECK-LABEL: @test_vgf2p8mulb_256_maskz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
@@ -569,7 +569,7 @@ declare <64 x i8> @llvm.x86.vgf2p8mulb.512(<64 x i8>, <64 x i8>)
define <64 x i8> @test_vgf2p8mulb_512(<64 x i8> %src1, <64 x i8> %src2) #0 {
; CHECK-LABEL: @test_vgf2p8mulb_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.x86.vgf2p8mulb.512(<64 x i8> [[SRC1:%.*]], <64 x i8> [[SRC2:%.*]])
@@ -582,10 +582,10 @@ define <64 x i8> @test_vgf2p8mulb_512(<64 x i8> %src1, <64 x i8> %src2) #0 {
define <64 x i8> @test_vgf2p8mulb_512_mask(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) #0 {
; CHECK-LABEL: @test_vgf2p8mulb_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64 [[TMP1]] to <64 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1>
@@ -608,9 +608,9 @@ define <64 x i8> @test_vgf2p8mulb_512_mask(<64 x i8> %src1, <64 x i8> %src2, <64
define <64 x i8> @test_vgf2p8mulb_512_maskz(<64 x i8> %src1, <64 x i8> %src2, i64 %mask) #0 {
; CHECK-LABEL: @test_vgf2p8mulb_512_maskz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i64 [[TMP1]] to <64 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1>
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics-upgrade.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics-upgrade.ll
index 74cb49b..25a4a9a 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics-upgrade.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics-upgrade.ll
@@ -12,7 +12,7 @@ define i16 @unpckbw_test(i16 %a0, i16 %a1) #0 {
;
; CHECK-LABEL: @unpckbw_test(
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[A0:%.*]] to <16 x i1>
@@ -37,8 +37,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_pbroadca
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pbroadcastd_gpr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <16 x i32> splat (i32 -1), i32 [[TMP1]], i64 0
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[X0:%.*]], i64 0
@@ -92,8 +92,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_pbroadcastq
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pbroadcastq_gpr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <8 x i64> splat (i64 -1), i64 [[TMP1]], i64 0
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[X0:%.*]], i64 0
@@ -162,8 +162,8 @@ define <16 x float> @test_x86_mask_vbroadcast_ss_ps_512(<4 x float> %a0, <16 x f
;
; CHECK-LABEL: @test_x86_mask_vbroadcast_ss_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> [[A0:%.*]], <4 x float> poison, <16 x i32> zeroinitializer
@@ -188,7 +188,7 @@ define <16 x float> @test_x86_maskz_vbroadcast_ss_ps_512(<4 x float> %a0, i16 %m
;
; CHECK-LABEL: @test_x86_maskz_vbroadcast_ss_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[A0:%.*]], <4 x float> poison, <16 x i32> zeroinitializer
@@ -227,8 +227,8 @@ define <8 x double> @test_x86_mask_vbroadcast_sd_pd_512(<2 x double> %a0, <8 x d
;
; CHECK-LABEL: @test_x86_mask_vbroadcast_sd_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> splat (i64 -1), <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[A0:%.*]], <2 x double> poison, <8 x i32> zeroinitializer
@@ -253,7 +253,7 @@ define <8 x double> @test_x86_maskz_vbroadcast_sd_pd_512(<2 x double> %a0, i8 %m
;
; CHECK-LABEL: @test_x86_maskz_vbroadcast_sd_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> splat (i64 -1), <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[A0:%.*]], <2 x double> poison, <8 x i32> zeroinitializer
@@ -292,8 +292,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pbroadcastd_512(<4 x i32> %x0, <16 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pbroadcastd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[X0:%.*]], <4 x i32> poison, <16 x i32> zeroinitializer
@@ -316,7 +316,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pbroadcastd_512(<4 x i32> %x0, i16 %
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pbroadcastd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[X0:%.*]], <4 x i32> poison, <16 x i32> zeroinitializer
@@ -354,8 +354,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pbroadcastq_512(<2 x i64> %x0, <8 x i6
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pbroadcastq_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> splat (i64 -1), <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[X0:%.*]], <2 x i64> poison, <8 x i32> zeroinitializer
@@ -378,7 +378,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pbroadcastq_512(<2 x i64> %x0, i8 %ma
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pbroadcastq_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> splat (i64 -1), <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[X0:%.*]], <2 x i64> poison, <8 x i32> zeroinitializer
@@ -416,8 +416,8 @@ define <16 x float>@test_int_x86_avx512_mask_movsldup_512(<16 x float> %x0, <16
;
; CHECK-LABEL: @test_int_x86_avx512_mask_movsldup_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X0]], <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
@@ -442,7 +442,7 @@ define <16 x float>@test_int_x86_avx512_maskz_movsldup_512(<16 x float> %x0, i16
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_movsldup_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X0]], <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
@@ -481,8 +481,8 @@ define <16 x float>@test_int_x86_avx512_mask_movshdup_512(<16 x float> %x0, <16
;
; CHECK-LABEL: @test_int_x86_avx512_mask_movshdup_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X0]], <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
@@ -507,7 +507,7 @@ define <16 x float>@test_int_x86_avx512_maskz_movshdup_512(<16 x float> %x0, i16
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_movshdup_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X0]], <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
@@ -546,8 +546,8 @@ define <8 x double>@test_int_x86_avx512_mask_movddup_512(<8 x double> %x0, <8 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_movddup_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X0]], <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -572,7 +572,7 @@ define <8 x double>@test_int_x86_avx512_maskz_movddup_512(<8 x double> %x0, i8 %
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_movddup_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X0]], <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -611,8 +611,8 @@ define <8 x double>@test_int_x86_avx512_mask_perm_df_512(<8 x double> %x0, <8 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_perm_df_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> <i32 3, i32 0, i32 0, i32 0, i32 7, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X0]], <8 x i32> <i32 3, i32 0, i32 0, i32 0, i32 7, i32 4, i32 4, i32 4>
@@ -637,7 +637,7 @@ define <8 x double>@test_int_x86_avx512_maskz_perm_df_512(<8 x double> %x0, i8 %
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_perm_df_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> <i32 3, i32 0, i32 0, i32 0, i32 7, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X0]], <8 x i32> <i32 3, i32 0, i32 0, i32 0, i32 7, i32 4, i32 4, i32 4>
@@ -676,8 +676,8 @@ define <8 x i64>@test_int_x86_avx512_mask_perm_di_512(<8 x i64> %x0, i32 %x1, <8
;
; CHECK-LABEL: @test_int_x86_avx512_mask_perm_di_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> <i32 3, i32 0, i32 0, i32 0, i32 7, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X0]], <8 x i32> <i32 3, i32 0, i32 0, i32 0, i32 7, i32 4, i32 4, i32 4>
@@ -700,7 +700,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_perm_di_512(<8 x i64> %x0, i32 %x1, i
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_perm_di_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> <i32 3, i32 0, i32 0, i32 0, i32 7, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X0]], <8 x i32> <i32 3, i32 0, i32 0, i32 0, i32 7, i32 4, i32 4, i32 4>
@@ -722,10 +722,10 @@ define <8 x i64>@test_int_x86_avx512_maskz_perm_di_512(<8 x i64> %x0, i32 %x1, i
define void @test_store1(<16 x float> %data, ptr %ptr, ptr %ptr2, i16 %mask) #0 {
;
; CHECK-LABEL: @test_store1(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -766,10 +766,10 @@ declare void @llvm.x86.avx512.mask.storeu.ps.512(ptr, <16 x float>, i16 )
define void @test_store2(<8 x double> %data, ptr %ptr, ptr %ptr2, i8 %mask) #0 {
;
; CHECK-LABEL: @test_store2(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -810,10 +810,10 @@ declare void @llvm.x86.avx512.mask.storeu.pd.512(ptr, <8 x double>, i8)
define void @test_mask_store_aligned_ps(<16 x float> %data, ptr %ptr, ptr %ptr2, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_store_aligned_ps(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -854,10 +854,10 @@ declare void @llvm.x86.avx512.mask.store.ps.512(ptr, <16 x float>, i16 )
define void @test_mask_store_aligned_pd(<8 x double> %data, ptr %ptr, ptr %ptr2, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_store_aligned_pd(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -898,10 +898,10 @@ declare void @llvm.x86.avx512.mask.store.pd.512(ptr, <8 x double>, i8)
define void@test_int_x86_avx512_mask_storeu_q_512(ptr %ptr1, ptr %ptr2, <8 x i64> %x1, i8 %x2) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask_storeu_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[X2:%.*]] to <8 x i1>
@@ -942,10 +942,10 @@ declare void @llvm.x86.avx512.mask.storeu.q.512(ptr, <8 x i64>, i8)
define void@test_int_x86_avx512_mask_storeu_d_512(ptr %ptr1, ptr %ptr2, <16 x i32> %x1, i16 %x2) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask_storeu_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[X2:%.*]] to <16 x i1>
@@ -986,10 +986,10 @@ declare void @llvm.x86.avx512.mask.storeu.d.512(ptr, <16 x i32>, i16)
define void@test_int_x86_avx512_mask_store_q_512(ptr %ptr1, ptr %ptr2, <8 x i64> %x1, i8 %x2) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask_store_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[X2:%.*]] to <8 x i1>
@@ -1030,10 +1030,10 @@ declare void @llvm.x86.avx512.mask.store.q.512(ptr, <8 x i64>, i8)
define void@test_int_x86_avx512_mask_store_d_512(ptr %ptr1, ptr %ptr2, <16 x i32> %x1, i16 %x2) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask_store_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[X2:%.*]] to <16 x i1>
@@ -1074,8 +1074,8 @@ declare void @llvm.x86.avx512.mask.store.d.512(ptr, <16 x i32>, i16)
define <16 x float> @test_mask_load_aligned_ps(<16 x float> %data, ptr %ptr, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_load_aligned_ps(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1137,8 +1137,8 @@ declare <16 x float> @llvm.x86.avx512.mask.load.ps.512(ptr, <16 x float>, i16)
define <16 x float> @test_mask_load_unaligned_ps(<16 x float> %data, ptr %ptr, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_load_unaligned_ps(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1200,8 +1200,8 @@ declare <16 x float> @llvm.x86.avx512.mask.loadu.ps.512(ptr, <16 x float>, i16)
define <8 x double> @test_mask_load_aligned_pd(<8 x double> %data, ptr %ptr, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_load_aligned_pd(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1263,8 +1263,8 @@ declare <8 x double> @llvm.x86.avx512.mask.load.pd.512(ptr, <8 x double>, i8)
define <8 x double> @test_mask_load_unaligned_pd(<8 x double> %data, ptr %ptr, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_load_unaligned_pd(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1329,8 +1329,8 @@ define <16 x i32> @test_mask_load_unaligned_d(ptr %ptr, ptr %ptr2, <16 x i32> %d
;
; CHECK-LABEL: @test_mask_load_unaligned_d(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -1393,8 +1393,8 @@ define <8 x i64> @test_mask_load_unaligned_q(ptr %ptr, ptr %ptr2, <8 x i64> %dat
;
; CHECK-LABEL: @test_mask_load_unaligned_q(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -1456,8 +1456,8 @@ declare <16 x i32> @llvm.x86.avx512.mask.load.d.512(ptr, <16 x i32>, i16)
define <16 x i32> @test_mask_load_aligned_d(<16 x i32> %data, ptr %ptr, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_load_aligned_d(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1519,8 +1519,8 @@ declare <8 x i64> @llvm.x86.avx512.mask.load.q.512(ptr, <8 x i64>, i8)
define <8 x i64> @test_mask_load_aligned_q(<8 x i64> %data, ptr %ptr, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_load_aligned_q(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -1596,8 +1596,8 @@ define <8 x double>@test_int_x86_avx512_mask_vpermil_pd_512(<8 x double> %x0, <8
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermil_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 6, i32 6>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X0]], <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 6, i32 6>
@@ -1622,7 +1622,7 @@ define <8 x double>@test_int_x86_avx512_maskz_vpermil_pd_512(<8 x double> %x0, i
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermil_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 6, i32 6>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X0]], <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 5, i32 4, i32 6, i32 6>
@@ -1661,8 +1661,8 @@ define <16 x float>@test_int_x86_avx512_mask_vpermil_ps_512(<16 x float> %x0, <1
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermil_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> <i32 2, i32 1, i32 1, i32 0, i32 6, i32 5, i32 5, i32 4, i32 10, i32 9, i32 9, i32 8, i32 14, i32 13, i32 13, i32 12>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X0]], <16 x i32> <i32 2, i32 1, i32 1, i32 0, i32 6, i32 5, i32 5, i32 4, i32 10, i32 9, i32 9, i32 8, i32 14, i32 13, i32 13, i32 12>
@@ -1687,7 +1687,7 @@ define <16 x float>@test_int_x86_avx512_maskz_vpermil_ps_512(<16 x float> %x0, i
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermil_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> <i32 2, i32 1, i32 1, i32 0, i32 6, i32 5, i32 5, i32 4, i32 10, i32 9, i32 9, i32 8, i32 14, i32 13, i32 13, i32 12>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X0]], <16 x i32> <i32 2, i32 1, i32 1, i32 0, i32 6, i32 5, i32 5, i32 4, i32 10, i32 9, i32 9, i32 8, i32 14, i32 13, i32 13, i32 12>
@@ -1726,8 +1726,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pshuf_d_512(<16 x i32> %x0, i32 %x1,
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pshuf_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> <i32 3, i32 0, i32 0, i32 0, i32 7, i32 4, i32 4, i32 4, i32 11, i32 8, i32 8, i32 8, i32 15, i32 12, i32 12, i32 12>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X0]], <16 x i32> <i32 3, i32 0, i32 0, i32 0, i32 7, i32 4, i32 4, i32 4, i32 11, i32 8, i32 8, i32 8, i32 15, i32 12, i32 12, i32 12>
@@ -1750,7 +1750,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pshuf_d_512(<16 x i32> %x0, i32 %x1,
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pshuf_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> <i32 3, i32 0, i32 0, i32 0, i32 7, i32 4, i32 4, i32 4, i32 11, i32 8, i32 8, i32 8, i32 15, i32 12, i32 12, i32 12>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X0]], <16 x i32> <i32 3, i32 0, i32 0, i32 0, i32 7, i32 4, i32 4, i32 4, i32 11, i32 8, i32 8, i32 8, i32 15, i32 12, i32 12, i32 12>
@@ -1772,7 +1772,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pshuf_d_512(<16 x i32> %x0, i32 %x1,
define i16 @test_pcmpeq_d(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_pcmpeq_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
@@ -1795,8 +1795,8 @@ define i16 @test_mask_pcmpeq_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_pcmpeq_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i32> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
@@ -1828,7 +1828,7 @@ declare i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32>, <16 x i32>, i16)
define i8 @test_pcmpeq_q(<8 x i64> %a, <8 x i64> %b) #0 {
; CHECK-LABEL: @test_pcmpeq_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
@@ -1851,8 +1851,8 @@ define i8 @test_mask_pcmpeq_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_pcmpeq_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
@@ -1884,7 +1884,7 @@ declare i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64>, <8 x i64>, i8)
define i16 @test_pcmpgt_d(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_pcmpgt_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[A:%.*]], splat (i32 -2147483648)
; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i32> [[TMP1]], splat (i32 -1)
@@ -1911,8 +1911,8 @@ define i16 @test_mask_pcmpgt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_pcmpgt_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i32> [[A:%.*]], splat (i32 -2147483648)
; CHECK-NEXT: [[TMP5:%.*]] = xor <16 x i32> [[TMP1]], splat (i32 -1)
@@ -1948,7 +1948,7 @@ declare i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32>, <16 x i32>, i16)
define i8 @test_pcmpgt_q(<8 x i64> %a, <8 x i64> %b) #0 {
; CHECK-LABEL: @test_pcmpgt_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[A:%.*]], splat (i64 -9223372036854775808)
; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[TMP1]], splat (i64 -1)
@@ -1975,8 +1975,8 @@ define i8 @test_mask_pcmpgt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_pcmpgt_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[A:%.*]], splat (i64 -9223372036854775808)
; CHECK-NEXT: [[TMP5:%.*]] = xor <8 x i64> [[TMP1]], splat (i64 -1)
@@ -2014,7 +2014,7 @@ declare <8 x double> @llvm.x86.avx512.mask.unpckh.pd.512(<8 x double>, <8 x doub
define <8 x double>@test_int_x86_avx512_unpckh_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_unpckh_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -2029,9 +2029,9 @@ define <8 x double>@test_int_x86_avx512_mask_unpckh_pd_512(<8 x double> %x0, <8
;
; CHECK-LABEL: @test_int_x86_avx512_mask_unpckh_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -2057,7 +2057,7 @@ declare <16 x float> @llvm.x86.avx512.mask.unpckh.ps.512(<16 x float>, <16 x flo
define <16 x float>@test_int_x86_avx512_unpckh_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_unpckh_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
@@ -2072,9 +2072,9 @@ define <16 x float>@test_int_x86_avx512_mask_unpckh_ps_512(<16 x float> %x0, <16
;
; CHECK-LABEL: @test_int_x86_avx512_mask_unpckh_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
@@ -2100,7 +2100,7 @@ declare <8 x double> @llvm.x86.avx512.mask.unpckl.pd.512(<8 x double>, <8 x doub
define <8 x double>@test_int_x86_avx512_unpckl_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_unpckl_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -2115,9 +2115,9 @@ define <8 x double>@test_int_x86_avx512_mask_unpckl_pd_512(<8 x double> %x0, <8
;
; CHECK-LABEL: @test_int_x86_avx512_mask_unpckl_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -2143,7 +2143,7 @@ declare <16 x float> @llvm.x86.avx512.mask.unpckl.ps.512(<16 x float>, <16 x flo
define <16 x float>@test_int_x86_avx512_unpckl_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_unpckl_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
@@ -2158,9 +2158,9 @@ define <16 x float>@test_int_x86_avx512_mask_unpckl_ps_512(<16 x float> %x0, <16
;
; CHECK-LABEL: @test_int_x86_avx512_mask_unpckl_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
@@ -2186,7 +2186,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64>, <8 x i64>, <8
define <8 x i64>@test_int_x86_avx512_punpcklqd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_punpcklqd_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -2201,9 +2201,9 @@ define <8 x i64>@test_int_x86_avx512_mask_punpcklqd_q_512(<8 x i64> %x0, <8 x i6
;
; CHECK-LABEL: @test_int_x86_avx512_mask_punpcklqd_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -2226,8 +2226,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_punpcklqd_q_512(<8 x i64> %x0, <8 x i
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_punpcklqd_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -2251,7 +2251,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.punpckhqd.q.512(<8 x i64>, <8 x i64>, <8
define <8 x i64>@test_int_x86_avx512_punpckhqd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_punpckhqd_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -2266,9 +2266,9 @@ define <8 x i64>@test_int_x86_avx512_mask_punpckhqd_q_512(<8 x i64> %x0, <8 x i6
;
; CHECK-LABEL: @test_int_x86_avx512_mask_punpckhqd_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -2292,7 +2292,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.punpckhd.q.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_punpckhd_q_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_punpckhd_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
@@ -2307,9 +2307,9 @@ define <16 x i32>@test_int_x86_avx512_mask_punpckhd_q_512(<16 x i32> %x0, <16 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_punpckhd_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> <i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
@@ -2333,7 +2333,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.punpckld.q.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_punpckld_q_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_punpckld_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
@@ -2348,9 +2348,9 @@ define <16 x i32>@test_int_x86_avx512_mask_punpckld_q_512(<16 x i32> %x0, <16 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_punpckld_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
@@ -2387,8 +2387,8 @@ define <16 x i32> @test_x86_avx512_mask_pslli_d(<16 x i32> %a0, <16 x i32> %a1,
;
; CHECK-LABEL: @test_x86_avx512_mask_pslli_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -2412,7 +2412,7 @@ define <16 x i32> @test_x86_avx512_maskz_pslli_d(<16 x i32> %a0, i16 %mask) #0
;
; CHECK-LABEL: @test_x86_avx512_maskz_pslli_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP3]], zeroinitializer
@@ -2452,8 +2452,8 @@ define <8 x i64> @test_x86_avx512_mask_pslli_q(<8 x i64> %a0, <8 x i64> %a1, i8
;
; CHECK-LABEL: @test_x86_avx512_mask_pslli_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -2477,7 +2477,7 @@ define <8 x i64> @test_x86_avx512_maskz_pslli_q(<8 x i64> %a0, i8 %mask) #0 {
;
; CHECK-LABEL: @test_x86_avx512_maskz_pslli_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP3]], zeroinitializer
@@ -2517,8 +2517,8 @@ define <16 x i32> @test_x86_avx512_mask_psrli_d(<16 x i32> %a0, <16 x i32> %a1,
;
; CHECK-LABEL: @test_x86_avx512_mask_psrli_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -2542,7 +2542,7 @@ define <16 x i32> @test_x86_avx512_maskz_psrli_d(<16 x i32> %a0, i16 %mask) #0
;
; CHECK-LABEL: @test_x86_avx512_maskz_psrli_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP3]], zeroinitializer
@@ -2582,8 +2582,8 @@ define <8 x i64> @test_x86_avx512_mask_psrli_q(<8 x i64> %a0, <8 x i64> %a1, i8
;
; CHECK-LABEL: @test_x86_avx512_mask_psrli_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -2607,7 +2607,7 @@ define <8 x i64> @test_x86_avx512_maskz_psrli_q(<8 x i64> %a0, i8 %mask) #0 {
;
; CHECK-LABEL: @test_x86_avx512_maskz_psrli_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP3]], zeroinitializer
@@ -2647,8 +2647,8 @@ define <16 x i32> @test_x86_avx512_mask_psrai_d(<16 x i32> %a0, <16 x i32> %a1,
;
; CHECK-LABEL: @test_x86_avx512_mask_psrai_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -2672,7 +2672,7 @@ define <16 x i32> @test_x86_avx512_maskz_psrai_d(<16 x i32> %a0, i16 %mask) #0
;
; CHECK-LABEL: @test_x86_avx512_maskz_psrai_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP3]], zeroinitializer
@@ -2712,8 +2712,8 @@ define <8 x i64> @test_x86_avx512_mask_psrai_q(<8 x i64> %a0, <8 x i64> %a1, i8
;
; CHECK-LABEL: @test_x86_avx512_mask_psrai_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -2737,7 +2737,7 @@ define <8 x i64> @test_x86_avx512_maskz_psrai_q(<8 x i64> %a0, i8 %mask) #0 {
;
; CHECK-LABEL: @test_x86_avx512_maskz_psrai_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP3]], zeroinitializer
@@ -2764,7 +2764,7 @@ declare void @llvm.x86.avx512.storent.q.512(ptr, <8 x i64>)
define void@test_storent_q_512(<8 x i64> %data, ptr %ptr) #0 {
;
; CHECK-LABEL: @test_storent_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2789,7 +2789,7 @@ declare void @llvm.x86.avx512.storent.pd.512(ptr, <8 x double>)
define void @test_storent_pd_512(<8 x double> %data, ptr %ptr) #0 {
;
; CHECK-LABEL: @test_storent_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2814,7 +2814,7 @@ declare void @llvm.x86.avx512.storent.ps.512(ptr, <16 x float>)
define void @test_storent_ps_512(<16 x float> %data, ptr %ptr) #0 {
;
; CHECK-LABEL: @test_storent_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2837,7 +2837,7 @@ define void @test_storent_ps_512(<16 x float> %data, ptr %ptr) #0 {
define <16 x i32> @test_xor_epi32(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_xor_epi32(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[A:%.*]], [[B:%.*]]
@@ -2852,9 +2852,9 @@ define <16 x i32> @test_mask_xor_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %
;
; CHECK-LABEL: @test_mask_xor_epi32(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = xor <16 x i32> [[A:%.*]], [[B:%.*]]
@@ -2878,7 +2878,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pxor.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32> @test_or_epi32(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_or_epi32(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[A:%.*]], splat (i32 -1)
; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i32> [[B:%.*]], splat (i32 -1)
@@ -2899,9 +2899,9 @@ define <16 x i32> @test_mask_or_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %p
;
; CHECK-LABEL: @test_mask_or_epi32(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = xor <16 x i32> [[A:%.*]], splat (i32 -1)
; CHECK-NEXT: [[TMP6:%.*]] = xor <16 x i32> [[B:%.*]], splat (i32 -1)
@@ -2931,7 +2931,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.por.d.512(<16 x i32>, <16 x i32>, <16 x
define <16 x i32> @test_and_epi32(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_and_epi32(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = and <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = and <16 x i32> [[A:%.*]], [[TMP2]]
@@ -2950,9 +2950,9 @@ define <16 x i32> @test_mask_and_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %
;
; CHECK-LABEL: @test_mask_and_epi32(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP6:%.*]] = and <16 x i32> [[A:%.*]], [[TMP2]]
@@ -2980,7 +2980,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pand.d.512(<16 x i32>, <16 x i32>, <16
define <8 x i64> @test_xor_epi64(<8 x i64> %a, <8 x i64> %b) #0 {
; CHECK-LABEL: @test_xor_epi64(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[A:%.*]], [[B:%.*]]
@@ -2995,9 +2995,9 @@ define <8 x i64> @test_mask_xor_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %pass
;
; CHECK-LABEL: @test_mask_xor_epi64(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = xor <8 x i64> [[A:%.*]], [[B:%.*]]
@@ -3021,7 +3021,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pxor.q.512(<8 x i64>, <8 x i64>, <8 x i6
define <8 x i64> @test_or_epi64(<8 x i64> %a, <8 x i64> %b) #0 {
; CHECK-LABEL: @test_or_epi64(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[A:%.*]], splat (i64 -1)
; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[B:%.*]], splat (i64 -1)
@@ -3042,9 +3042,9 @@ define <8 x i64> @test_mask_or_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passT
;
; CHECK-LABEL: @test_mask_or_epi64(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = xor <8 x i64> [[A:%.*]], splat (i64 -1)
; CHECK-NEXT: [[TMP6:%.*]] = xor <8 x i64> [[B:%.*]], splat (i64 -1)
@@ -3074,7 +3074,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.por.q.512(<8 x i64>, <8 x i64>, <8 x i64
define <8 x i64> @test_and_epi64(<8 x i64> %a, <8 x i64> %b) #0 {
; CHECK-LABEL: @test_and_epi64(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = and <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = and <8 x i64> [[A:%.*]], [[TMP2]]
@@ -3093,9 +3093,9 @@ define <8 x i64> @test_mask_and_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %pass
;
; CHECK-LABEL: @test_mask_and_epi64(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP6:%.*]] = and <8 x i64> [[A:%.*]], [[TMP2]]
@@ -3123,7 +3123,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pand.q.512(<8 x i64>, <8 x i64>, <8 x i6
define <16 x i32> @test_mask_add_epi32_rr(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_mask_add_epi32_rr(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = add <16 x i32> [[A:%.*]], [[B:%.*]]
@@ -3138,9 +3138,9 @@ define <16 x i32> @test_mask_add_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i
;
; CHECK-LABEL: @test_mask_add_epi32_rrk(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i32> [[A:%.*]], [[B:%.*]]
@@ -3163,8 +3163,8 @@ define <16 x i32> @test_mask_add_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %m
;
; CHECK-LABEL: @test_mask_add_epi32_rrkz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = add <16 x i32> [[A:%.*]], [[B:%.*]]
@@ -3186,7 +3186,7 @@ define <16 x i32> @test_mask_add_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %m
define <16 x i32> @test_mask_add_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 {
;
; CHECK-LABEL: @test_mask_add_epi32_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -3213,10 +3213,10 @@ define <16 x i32> @test_mask_add_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 {
define <16 x i32> @test_mask_add_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <16 x i32> %passThru, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_add_epi32_rmk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -3250,9 +3250,9 @@ define <16 x i32> @test_mask_add_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <16 x i32>
define <16 x i32> @test_mask_add_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_add_epi32_rmkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -3286,9 +3286,9 @@ define <16 x i32> @test_mask_add_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i16 %mask
define <16 x i32> @test_mask_add_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_add_epi32_rmb(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -3321,12 +3321,12 @@ define <16 x i32> @test_mask_add_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <16 x i32>
define <16 x i32> @test_mask_add_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <16 x i32> %passThru, i16 %mask, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_add_epi32_rmbk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF1]]
@@ -3366,11 +3366,11 @@ define <16 x i32> @test_mask_add_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <16 x i32
define <16 x i32> @test_mask_add_epi32_rmbkz(<16 x i32> %a, ptr %ptr_b, i16 %mask, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_add_epi32_rmbkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF1]]
@@ -3412,7 +3412,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32> @test_mask_sub_epi32_rr(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_mask_sub_epi32_rr(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = sub <16 x i32> [[A:%.*]], [[B:%.*]]
@@ -3427,9 +3427,9 @@ define <16 x i32> @test_mask_sub_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i
;
; CHECK-LABEL: @test_mask_sub_epi32_rrk(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = sub <16 x i32> [[A:%.*]], [[B:%.*]]
@@ -3452,8 +3452,8 @@ define <16 x i32> @test_mask_sub_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %m
;
; CHECK-LABEL: @test_mask_sub_epi32_rrkz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = sub <16 x i32> [[A:%.*]], [[B:%.*]]
@@ -3475,7 +3475,7 @@ define <16 x i32> @test_mask_sub_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %m
define <16 x i32> @test_mask_sub_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 {
;
; CHECK-LABEL: @test_mask_sub_epi32_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -3502,10 +3502,10 @@ define <16 x i32> @test_mask_sub_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 {
define <16 x i32> @test_mask_sub_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <16 x i32> %passThru, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_sub_epi32_rmk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -3539,9 +3539,9 @@ define <16 x i32> @test_mask_sub_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <16 x i32>
define <16 x i32> @test_mask_sub_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_sub_epi32_rmkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -3575,9 +3575,9 @@ define <16 x i32> @test_mask_sub_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i16 %mask
define <16 x i32> @test_mask_sub_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_sub_epi32_rmb(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -3610,12 +3610,12 @@ define <16 x i32> @test_mask_sub_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <16 x i32>
define <16 x i32> @test_mask_sub_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <16 x i32> %passThru, i16 %mask, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_sub_epi32_rmbk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF1]]
@@ -3655,10 +3655,10 @@ define <16 x i32> @test_mask_sub_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <16 x i32
define <16 x i32> @test_mask_sub_epi32_rmbkz(<16 x i32> %a, ptr %ptr_b, i16 %mask, <16 x i32> %extra_param) #0 {
;
; CHECK-LABEL: @test_mask_sub_epi32_rmbkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP17:%.*]], !prof [[PROF1]]
@@ -3700,7 +3700,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32>, <16 x i32>, <16
define <8 x i64> @test_mask_add_epi64_rr(<8 x i64> %a, <8 x i64> %b) #0 {
; CHECK-LABEL: @test_mask_add_epi64_rr(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = add <8 x i64> [[A:%.*]], [[B:%.*]]
@@ -3715,9 +3715,9 @@ define <8 x i64> @test_mask_add_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64>
;
; CHECK-LABEL: @test_mask_add_epi64_rrk(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = add <8 x i64> [[A:%.*]], [[B:%.*]]
@@ -3740,8 +3740,8 @@ define <8 x i64> @test_mask_add_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask)
;
; CHECK-LABEL: @test_mask_add_epi64_rrkz(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = add <8 x i64> [[A:%.*]], [[B:%.*]]
@@ -3763,7 +3763,7 @@ define <8 x i64> @test_mask_add_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask)
define <8 x i64> @test_mask_add_epi64_rm(<8 x i64> %a, ptr %ptr_b) #0 {
;
; CHECK-LABEL: @test_mask_add_epi64_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -3790,10 +3790,10 @@ define <8 x i64> @test_mask_add_epi64_rm(<8 x i64> %a, ptr %ptr_b) #0 {
define <8 x i64> @test_mask_add_epi64_rmk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_add_epi64_rmk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -3827,9 +3827,9 @@ define <8 x i64> @test_mask_add_epi64_rmk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %p
define <8 x i64> @test_mask_add_epi64_rmkz(<8 x i64> %a, ptr %ptr_b, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_add_epi64_rmkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -3863,9 +3863,9 @@ define <8 x i64> @test_mask_add_epi64_rmkz(<8 x i64> %a, ptr %ptr_b, i8 %mask)
define <8 x i64> @test_mask_add_epi64_rmb(<8 x i64> %a, ptr %ptr_b, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_add_epi64_rmb(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -3898,12 +3898,12 @@ define <8 x i64> @test_mask_add_epi64_rmb(<8 x i64> %a, ptr %ptr_b, <8 x i64> %e
define <8 x i64> @test_mask_add_epi64_rmbk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_add_epi64_rmbk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF1]]
@@ -3943,11 +3943,11 @@ define <8 x i64> @test_mask_add_epi64_rmbk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %
define <8 x i64> @test_mask_add_epi64_rmbkz(<8 x i64> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_add_epi64_rmbkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF1]]
@@ -3989,7 +3989,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64>, <8 x i64>, <8 x i6
define <8 x i64> @test_mask_sub_epi64_rr(<8 x i64> %a, <8 x i64> %b) #0 {
; CHECK-LABEL: @test_mask_sub_epi64_rr(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = sub <8 x i64> [[A:%.*]], [[B:%.*]]
@@ -4004,9 +4004,9 @@ define <8 x i64> @test_mask_sub_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64>
;
; CHECK-LABEL: @test_mask_sub_epi64_rrk(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = sub <8 x i64> [[A:%.*]], [[B:%.*]]
@@ -4029,8 +4029,8 @@ define <8 x i64> @test_mask_sub_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask)
;
; CHECK-LABEL: @test_mask_sub_epi64_rrkz(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = sub <8 x i64> [[A:%.*]], [[B:%.*]]
@@ -4052,7 +4052,7 @@ define <8 x i64> @test_mask_sub_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask)
define <8 x i64> @test_mask_sub_epi64_rm(<8 x i64> %a, ptr %ptr_b) #0 {
;
; CHECK-LABEL: @test_mask_sub_epi64_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -4079,10 +4079,10 @@ define <8 x i64> @test_mask_sub_epi64_rm(<8 x i64> %a, ptr %ptr_b) #0 {
define <8 x i64> @test_mask_sub_epi64_rmk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_sub_epi64_rmk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -4116,9 +4116,9 @@ define <8 x i64> @test_mask_sub_epi64_rmk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %p
define <8 x i64> @test_mask_sub_epi64_rmkz(<8 x i64> %a, ptr %ptr_b, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_sub_epi64_rmkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -4152,9 +4152,9 @@ define <8 x i64> @test_mask_sub_epi64_rmkz(<8 x i64> %a, ptr %ptr_b, i8 %mask)
define <8 x i64> @test_mask_sub_epi64_rmb(<8 x i64> %a, ptr %ptr_b, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_sub_epi64_rmb(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -4187,12 +4187,12 @@ define <8 x i64> @test_mask_sub_epi64_rmb(<8 x i64> %a, ptr %ptr_b, <8 x i64> %e
define <8 x i64> @test_mask_sub_epi64_rmbk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_sub_epi64_rmbk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF1]]
@@ -4232,11 +4232,11 @@ define <8 x i64> @test_mask_sub_epi64_rmbk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %
define <8 x i64> @test_mask_sub_epi64_rmbkz(<8 x i64> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_sub_epi64_rmbkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF1]]
@@ -4278,7 +4278,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64>, <8 x i64>, <8 x i6
define <16 x i32> @test_mask_mullo_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_mask_mullo_epi32_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = mul <16 x i32> [[A:%.*]], [[B:%.*]]
@@ -4293,9 +4293,9 @@ define <16 x i32> @test_mask_mullo_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <
;
; CHECK-LABEL: @test_mask_mullo_epi32_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = mul <16 x i32> [[A:%.*]], [[B:%.*]]
@@ -4318,8 +4318,8 @@ define <16 x i32> @test_mask_mullo_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
;
; CHECK-LABEL: @test_mask_mullo_epi32_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[A:%.*]], [[B:%.*]]
@@ -4341,7 +4341,7 @@ define <16 x i32> @test_mask_mullo_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
define <16 x i32> @test_mask_mullo_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) #0 {
;
; CHECK-LABEL: @test_mask_mullo_epi32_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -4368,10 +4368,10 @@ define <16 x i32> @test_mask_mullo_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) #0 {
define <16 x i32> @test_mask_mullo_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <16 x i32> %passThru, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_mullo_epi32_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -4405,9 +4405,9 @@ define <16 x i32> @test_mask_mullo_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <16
define <16 x i32> @test_mask_mullo_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_mullo_epi32_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -4441,9 +4441,9 @@ define <16 x i32> @test_mask_mullo_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i16
define <16 x i32> @test_mask_mullo_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_mullo_epi32_rmb_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -4476,12 +4476,12 @@ define <16 x i32> @test_mask_mullo_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b, <16
define <16 x i32> @test_mask_mullo_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <16 x i32> %passThru, i16 %mask, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_mullo_epi32_rmbk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF1]]
@@ -4521,11 +4521,11 @@ define <16 x i32> @test_mask_mullo_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <16
define <16 x i32> @test_mask_mullo_epi32_rmbkz_512(<16 x i32> %a, ptr %ptr_b, i16 %mask, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_mullo_epi32_rmbkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF1]]
@@ -4570,7 +4570,7 @@ declare <16 x float> @llvm.x86.avx512.mask.shuf.f32x4(<16 x float>, <16 x float>
define <16 x float>@test_int_x86_avx512_shuf_f32x4(<16 x float> %x0, <16 x float> %x1, <16 x float> %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_shuf_f32x4(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
@@ -4585,9 +4585,9 @@ define <16 x float>@test_int_x86_avx512_mask_shuf_f32x4(<16 x float> %x0, <16 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_shuf_f32x4(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
@@ -4613,7 +4613,7 @@ declare <8 x double> @llvm.x86.avx512.mask.shuf.f64x2(<8 x double>, <8 x double>
define <8 x double>@test_int_x86_avx512_shuf_f64x2(<8 x double> %x0, <8 x double> %x1, <8 x double> %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_shuf_f64x2(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 4, i32 5, i32 2, i32 3, i32 10, i32 11, i32 8, i32 9>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> <i32 4, i32 5, i32 2, i32 3, i32 10, i32 11, i32 8, i32 9>
@@ -4628,9 +4628,9 @@ define <8 x double>@test_int_x86_avx512_mask_shuf_f64x2(<8 x double> %x0, <8 x d
;
; CHECK-LABEL: @test_int_x86_avx512_mask_shuf_f64x2(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 4, i32 5, i32 2, i32 3, i32 10, i32 11, i32 8, i32 9>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> <i32 4, i32 5, i32 2, i32 3, i32 10, i32 11, i32 8, i32 9>
@@ -4655,8 +4655,8 @@ define <8 x double>@test_int_x86_avx512_maskz_shuf_f64x2(<8 x double> %x0, <8 x
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_shuf_f64x2(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 4, i32 5, i32 2, i32 3, i32 10, i32 11, i32 8, i32 9>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> <i32 4, i32 5, i32 2, i32 3, i32 10, i32 11, i32 8, i32 9>
@@ -4681,7 +4681,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.shuf.i32x4(<16 x i32>, <16 x i32>, i32,
define <16 x i32>@test_int_x86_avx512_shuf_i32x4(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_shuf_i32x4(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
@@ -4696,9 +4696,9 @@ define <16 x i32>@test_int_x86_avx512_mask_shuf_i32x4(<16 x i32> %x0, <16 x i32>
;
; CHECK-LABEL: @test_int_x86_avx512_mask_shuf_i32x4(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
@@ -4722,7 +4722,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.shuf.i64x2(<8 x i64>, <8 x i64>, i32, <8
define <8 x i64>@test_int_x86_avx512_shuf_i64x2(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_shuf_i64x2(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 4, i32 5, i32 2, i32 3, i32 10, i32 11, i32 8, i32 9>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> <i32 4, i32 5, i32 2, i32 3, i32 10, i32 11, i32 8, i32 9>
@@ -4737,9 +4737,9 @@ define <8 x i64>@test_int_x86_avx512_mask_shuf_i64x2(<8 x i64> %x0, <8 x i64> %x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_shuf_i64x2(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 4, i32 5, i32 2, i32 3, i32 10, i32 11, i32 8, i32 9>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> <i32 4, i32 5, i32 2, i32 3, i32 10, i32 11, i32 8, i32 9>
@@ -4763,7 +4763,7 @@ declare <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double>, <8 x double
define <8 x double>@test_int_x86_avx512_shuf_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_shuf_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 0, i32 9, i32 3, i32 10, i32 5, i32 12, i32 6, i32 14>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> <i32 0, i32 9, i32 3, i32 10, i32 5, i32 12, i32 6, i32 14>
@@ -4778,9 +4778,9 @@ define <8 x double>@test_int_x86_avx512_mask_shuf_pd_512(<8 x double> %x0, <8 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_shuf_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 0, i32 9, i32 3, i32 10, i32 5, i32 12, i32 6, i32 14>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> <i32 0, i32 9, i32 3, i32 10, i32 5, i32 12, i32 6, i32 14>
@@ -4805,8 +4805,8 @@ define <8 x double>@test_int_x86_avx512_maskz_shuf_pd_512(<8 x double> %x0, <8 x
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_shuf_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 0, i32 9, i32 3, i32 10, i32 5, i32 12, i32 6, i32 14>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> <i32 0, i32 9, i32 3, i32 10, i32 5, i32 12, i32 6, i32 14>
@@ -4831,7 +4831,7 @@ declare <16 x float> @llvm.x86.avx512.mask.shuf.ps.512(<16 x float>, <16 x float
define <16 x float>@test_int_x86_avx512_shuf_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_shuf_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 2, i32 1, i32 17, i32 16, i32 6, i32 5, i32 21, i32 20, i32 10, i32 9, i32 25, i32 24, i32 14, i32 13, i32 29, i32 28>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> <i32 2, i32 1, i32 17, i32 16, i32 6, i32 5, i32 21, i32 20, i32 10, i32 9, i32 25, i32 24, i32 14, i32 13, i32 29, i32 28>
@@ -4846,9 +4846,9 @@ define <16 x float>@test_int_x86_avx512_mask_shuf_ps_512(<16 x float> %x0, <16 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_shuf_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 2, i32 1, i32 17, i32 16, i32 6, i32 5, i32 21, i32 20, i32 10, i32 9, i32 25, i32 24, i32 14, i32 13, i32 29, i32 28>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> <i32 2, i32 1, i32 17, i32 16, i32 6, i32 5, i32 21, i32 20, i32 10, i32 9, i32 25, i32 24, i32 14, i32 13, i32 29, i32 28>
@@ -4874,7 +4874,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32>@test_int_x86_avx512_pmaxs_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmaxs_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.smax.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -4889,9 +4889,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pmaxs_d_512(<16 x i32> %x0, <16 x i32
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i32> @llvm.smax.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -4915,7 +4915,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64>, <8 x i64>, <8 x i
define <8 x i64>@test_int_x86_avx512_pmaxs_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmaxs_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.smax.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -4930,9 +4930,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pmaxs_q_512(<8 x i64> %x0, <8 x i64> %
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i64> @llvm.smax.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -4956,7 +4956,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmaxu.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32>@test_int_x86_avx512_pmaxu_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmaxu_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.umax.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -4971,9 +4971,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pmaxu_d_512(<16 x i32> %x0, <16 x i32
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i32> @llvm.umax.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -4997,7 +4997,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmaxu.q.512(<8 x i64>, <8 x i64>, <8 x i
define <8 x i64>@test_int_x86_avx512_pmaxu_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmaxu_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.umax.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -5012,9 +5012,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pmaxu_q_512(<8 x i64> %x0, <8 x i64> %
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i64> @llvm.umax.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -5038,7 +5038,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmins.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32>@test_int_x86_avx512_pmins_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmins_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.smin.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -5053,9 +5053,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pmins_d_512(<16 x i32> %x0, <16 x i32
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i32> @llvm.smin.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -5079,7 +5079,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmins.q.512(<8 x i64>, <8 x i64>, <8 x i
define <8 x i64>@test_int_x86_avx512_pmins_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmins_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.smin.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -5094,9 +5094,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pmins_q_512(<8 x i64> %x0, <8 x i64> %
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i64> @llvm.smin.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -5120,7 +5120,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32>@test_int_x86_avx512_pminu_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pminu_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.umin.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -5135,9 +5135,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pminu_d_512(<16 x i32> %x0, <16 x i32
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i32> @llvm.umin.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -5161,7 +5161,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pminu.q.512(<8 x i64>, <8 x i64>, <8 x i
define <8 x i64>@test_int_x86_avx512_pminu_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pminu_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.umin.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -5176,9 +5176,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pminu_q_512(<8 x i64> %x0, <8 x i64> %
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i64> @llvm.umin.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -5201,10 +5201,10 @@ define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x
;
; CHECK-LABEL: @test_mm_mask_move_ss(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[TMP0]], 0
; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[__U:%.*]], 0
@@ -5248,8 +5248,8 @@ define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4
; CHECK-LABEL: @test_mm_maskz_move_ss(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP0]], 0
; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[__U:%.*]], 0
@@ -5288,10 +5288,10 @@ define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2
;
; CHECK-LABEL: @test_mm_mask_move_sd(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[TMP0]], 0
; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[__U:%.*]], 0
@@ -5334,8 +5334,8 @@ define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, <
; CHECK-LABEL: @test_mm_maskz_move_sd(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP0]], 0
; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[__U:%.*]], 0
@@ -5394,8 +5394,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pmovzxb_d_512(<16 x i8> %x0, <16 x i3
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovzxb_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -5420,7 +5420,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pmovzxb_d_512(<16 x i8> %x0, i16 %x2
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovzxb_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -5462,8 +5462,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovzxb_q_512(<16 x i8> %x0, <8 x i64>
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovzxb_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -5488,7 +5488,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pmovzxb_q_512(<16 x i8> %x0, i8 %x2)
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovzxb_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -5530,8 +5530,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovzxd_q_512(<8 x i32> %x0, <8 x i64>
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovzxd_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> splat (i32 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[X0:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -5556,7 +5556,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pmovzxd_q_512(<8 x i32> %x0, i8 %x2)
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovzxd_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> splat (i32 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[X0:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -5598,8 +5598,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pmovzxw_d_512(<16 x i16> %x0, <16 x i
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovzxw_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> splat (i16 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i16> [[X0:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -5624,7 +5624,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pmovzxw_d_512(<16 x i16> %x0, i16 %x
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovzxw_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> splat (i16 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i16> [[X0:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -5666,8 +5666,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovzxw_q_512(<8 x i16> %x0, <8 x i64>
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovzxw_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i16> [[X0:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -5692,7 +5692,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pmovzxw_q_512(<8 x i16> %x0, i8 %x2)
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovzxw_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[X0:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -5734,8 +5734,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pmovsxb_d_512(<16 x i8> %x0, <16 x i3
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovsxb_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -5760,7 +5760,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pmovsxb_d_512(<16 x i8> %x0, i16 %x2
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovsxb_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -5802,8 +5802,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovsxb_q_512(<16 x i8> %x0, <8 x i64>
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovsxb_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -5828,7 +5828,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pmovsxb_q_512(<16 x i8> %x0, i8 %x2)
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovsxb_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -5870,8 +5870,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovsxd_q_512(<8 x i32> %x0, <8 x i64>
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovsxd_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> splat (i32 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[X0:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -5896,7 +5896,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pmovsxd_q_512(<8 x i32> %x0, i8 %x2)
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovsxd_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> splat (i32 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[X0:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -5938,8 +5938,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pmovsxw_d_512(<16 x i16> %x0, <16 x i
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovsxw_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> splat (i16 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i16> [[X0:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -5964,7 +5964,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pmovsxw_d_512(<16 x i16> %x0, i16 %x
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovsxw_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> splat (i16 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i16> [[X0:%.*]], <16 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -6006,8 +6006,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovsxw_q_512(<8 x i16> %x0, <8 x i64>
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovsxw_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i16> [[X0:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -6032,7 +6032,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pmovsxw_q_512(<8 x i16> %x0, i8 %x2)
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovsxw_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[X0:%.*]], <8 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -6058,7 +6058,7 @@ declare <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32>, <16 x i32>)
define <16 x i32>@test_int_x86_avx512_prolv_d_512(<16 x i32> %x0, <16 x i32> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_prolv_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -6076,9 +6076,9 @@ define <16 x i32>@test_int_x86_avx512_mask_prolv_d_512(<16 x i32> %x0, <16 x i32
;
; CHECK-LABEL: @test_int_x86_avx512_mask_prolv_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -6106,8 +6106,8 @@ define <16 x i32>@test_int_x86_avx512_maskz_prolv_d_512(<16 x i32> %x0, <16 x i3
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_prolv_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -6136,7 +6136,7 @@ declare <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64>, <8 x i64>)
define <8 x i64>@test_int_x86_avx512_prolv_q_512(<8 x i64> %x0, <8 x i64> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_prolv_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64>
@@ -6154,9 +6154,9 @@ define <8 x i64>@test_int_x86_avx512_mask_prolv_q_512(<8 x i64> %x0, <8 x i64> %
;
; CHECK-LABEL: @test_int_x86_avx512_mask_prolv_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64>
@@ -6184,8 +6184,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_prolv_q_512(<8 x i64> %x0, <8 x i64>
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_prolv_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64>
@@ -6214,7 +6214,7 @@ declare <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32>, <16 x i32>)
define <16 x i32>@test_int_x86_avx512_prorv_d_512(<16 x i32> %x0, <16 x i32> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_prorv_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -6232,9 +6232,9 @@ define <16 x i32>@test_int_x86_avx512_mask_prorv_d_512(<16 x i32> %x0, <16 x i32
;
; CHECK-LABEL: @test_int_x86_avx512_mask_prorv_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -6262,8 +6262,8 @@ define <16 x i32>@test_int_x86_avx512_maskz_prorv_d_512(<16 x i32> %x0, <16 x i3
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_prorv_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -6292,7 +6292,7 @@ declare <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64>, <8 x i64>)
define <8 x i64>@test_int_x86_avx512_prorv_q_512(<8 x i64> %x0, <8 x i64> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_prorv_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64>
@@ -6310,9 +6310,9 @@ define <8 x i64>@test_int_x86_avx512_mask_prorv_q_512(<8 x i64> %x0, <8 x i64> %
;
; CHECK-LABEL: @test_int_x86_avx512_mask_prorv_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64>
@@ -6340,8 +6340,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_prorv_q_512(<8 x i64> %x0, <8 x i64>
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_prorv_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64>
@@ -6371,8 +6371,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_prol_d_512(<1
;
; CHECK-LABEL: @test_int_x86_avx512_prol_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> splat (i32 3))
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -6427,8 +6427,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_prol_q_512(<8 x
;
; CHECK-LABEL: @test_int_x86_avx512_prol_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i64> splat (i64 3))
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -6483,8 +6483,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_pror_d_512(<1
;
; CHECK-LABEL: @test_int_x86_avx512_pror_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> splat (i32 3))
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -6539,8 +6539,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_pror_q_512(<8 x
;
; CHECK-LABEL: @test_int_x86_avx512_pror_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i64> splat (i64 3))
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -6595,8 +6595,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_psrl_qi_512
;
; CHECK-LABEL: @test_int_x86_avx512_mask_psrl_qi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> [[TMP1]], i32 4)
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -6647,8 +6647,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_psrl_di_
;
; CHECK-LABEL: @test_int_x86_avx512_mask_psrl_di_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> [[TMP1]], i32 4)
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -6699,8 +6699,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_psra_di_
;
; CHECK-LABEL: @test_int_x86_avx512_mask_psra_di_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> [[TMP1]], i32 3)
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -6751,8 +6751,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_psra_qi_512
;
; CHECK-LABEL: @test_int_x86_avx512_mask_psra_qi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> [[TMP1]], i32 3)
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -6803,8 +6803,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_psll_di_
;
; CHECK-LABEL: @test_int_x86_avx512_mask_psll_di_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> [[TMP1]], i32 3)
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -6855,8 +6855,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_psll_qi_512
;
; CHECK-LABEL: @test_int_x86_avx512_mask_psll_qi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> [[TMP1]], i32 3)
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -6904,7 +6904,7 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_psll_qi_512
define <16 x i32> @test_x86_avx512_psll_d(<16 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psll_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -6925,9 +6925,9 @@ define <16 x i32> @test_x86_avx512_mask_psll_d(<16 x i32> %a0, <4 x i32> %a1, <1
;
; CHECK-LABEL: @test_x86_avx512_mask_psll_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -6956,8 +6956,8 @@ define <16 x i32> @test_x86_avx512_maskz_psll_d(<16 x i32> %a0, <4 x i32> %a1, i
;
; CHECK-LABEL: @test_x86_avx512_maskz_psll_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -6987,7 +6987,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32>, <4 x i32>, <16 x i32
define <8 x i64> @test_x86_avx512_psll_q(<8 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psll_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -7008,9 +7008,9 @@ define <8 x i64> @test_x86_avx512_mask_psll_q(<8 x i64> %a0, <2 x i64> %a1, <8 x
;
; CHECK-LABEL: @test_x86_avx512_mask_psll_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -7039,8 +7039,8 @@ define <8 x i64> @test_x86_avx512_maskz_psll_q(<8 x i64> %a0, <2 x i64> %a1, i8
;
; CHECK-LABEL: @test_x86_avx512_maskz_psll_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -7070,7 +7070,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64>, <2 x i64>, <8 x i64>,
define <16 x i32> @test_x86_avx512_psrl_d(<16 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrl_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -7091,9 +7091,9 @@ define <16 x i32> @test_x86_avx512_mask_psrl_d(<16 x i32> %a0, <4 x i32> %a1, <1
;
; CHECK-LABEL: @test_x86_avx512_mask_psrl_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -7122,8 +7122,8 @@ define <16 x i32> @test_x86_avx512_maskz_psrl_d(<16 x i32> %a0, <4 x i32> %a1, i
;
; CHECK-LABEL: @test_x86_avx512_maskz_psrl_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -7153,7 +7153,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32>, <4 x i32>, <16 x i32
define <8 x i64> @test_x86_avx512_psrl_q(<8 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrl_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -7174,9 +7174,9 @@ define <8 x i64> @test_x86_avx512_mask_psrl_q(<8 x i64> %a0, <2 x i64> %a1, <8 x
;
; CHECK-LABEL: @test_x86_avx512_mask_psrl_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -7205,8 +7205,8 @@ define <8 x i64> @test_x86_avx512_maskz_psrl_q(<8 x i64> %a0, <2 x i64> %a1, i8
;
; CHECK-LABEL: @test_x86_avx512_maskz_psrl_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -7236,7 +7236,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64>, <2 x i64>, <8 x i64>,
define <16 x i32> @test_x86_avx512_psra_d(<16 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psra_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -7257,9 +7257,9 @@ define <16 x i32> @test_x86_avx512_mask_psra_d(<16 x i32> %a0, <4 x i32> %a1, <1
;
; CHECK-LABEL: @test_x86_avx512_mask_psra_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -7288,8 +7288,8 @@ define <16 x i32> @test_x86_avx512_maskz_psra_d(<16 x i32> %a0, <4 x i32> %a1, i
;
; CHECK-LABEL: @test_x86_avx512_maskz_psra_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -7319,7 +7319,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32>, <4 x i32>, <16 x i32
define <8 x i64> @test_x86_avx512_psra_q(<8 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psra_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -7340,9 +7340,9 @@ define <8 x i64> @test_x86_avx512_mask_psra_q(<8 x i64> %a0, <2 x i64> %a1, <8 x
;
; CHECK-LABEL: @test_x86_avx512_mask_psra_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -7371,8 +7371,8 @@ define <8 x i64> @test_x86_avx512_maskz_psra_q(<8 x i64> %a0, <2 x i64> %a1, i8
;
; CHECK-LABEL: @test_x86_avx512_maskz_psra_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -7402,7 +7402,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64>, <2 x i64>, <8 x i64>,
define <16 x i32> @test_x86_avx512_psllv_d(<16 x i32> %a0, <16 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psllv_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -7420,9 +7420,9 @@ define <16 x i32> @test_x86_avx512_mask_psllv_d(<16 x i32> %a0, <16 x i32> %a1,
;
; CHECK-LABEL: @test_x86_avx512_mask_psllv_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -7448,8 +7448,8 @@ define <16 x i32> @test_x86_avx512_maskz_psllv_d(<16 x i32> %a0, <16 x i32> %a1,
;
; CHECK-LABEL: @test_x86_avx512_maskz_psllv_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -7476,7 +7476,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32>, <16 x i32>, <16 x i
define <8 x i64> @test_x86_avx512_psllv_q(<8 x i64> %a0, <8 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psllv_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64>
@@ -7494,9 +7494,9 @@ define <8 x i64> @test_x86_avx512_mask_psllv_q(<8 x i64> %a0, <8 x i64> %a1, <8
;
; CHECK-LABEL: @test_x86_avx512_mask_psllv_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64>
@@ -7522,8 +7522,8 @@ define <8 x i64> @test_x86_avx512_maskz_psllv_q(<8 x i64> %a0, <8 x i64> %a1, i8
;
; CHECK-LABEL: @test_x86_avx512_maskz_psllv_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64>
@@ -7551,7 +7551,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64>, <8 x i64>, <8 x i64>,
define <16 x i32> @test_x86_avx512_psrav_d(<16 x i32> %a0, <16 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrav_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -7569,9 +7569,9 @@ define <16 x i32> @test_x86_avx512_mask_psrav_d(<16 x i32> %a0, <16 x i32> %a1,
;
; CHECK-LABEL: @test_x86_avx512_mask_psrav_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -7597,8 +7597,8 @@ define <16 x i32> @test_x86_avx512_maskz_psrav_d(<16 x i32> %a0, <16 x i32> %a1,
;
; CHECK-LABEL: @test_x86_avx512_maskz_psrav_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -7625,7 +7625,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32>, <16 x i32>, <16 x i
define <8 x i64> @test_x86_avx512_psrav_q(<8 x i64> %a0, <8 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrav_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64>
@@ -7643,9 +7643,9 @@ define <8 x i64> @test_x86_avx512_mask_psrav_q(<8 x i64> %a0, <8 x i64> %a1, <8
;
; CHECK-LABEL: @test_x86_avx512_mask_psrav_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64>
@@ -7671,8 +7671,8 @@ define <8 x i64> @test_x86_avx512_maskz_psrav_q(<8 x i64> %a0, <8 x i64> %a1, i8
;
; CHECK-LABEL: @test_x86_avx512_maskz_psrav_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64>
@@ -7699,7 +7699,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64>, <8 x i64>, <8 x i64>,
define <16 x i32> @test_x86_avx512_psrlv_d(<16 x i32> %a0, <16 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrlv_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -7717,9 +7717,9 @@ define <16 x i32> @test_x86_avx512_mask_psrlv_d(<16 x i32> %a0, <16 x i32> %a1,
;
; CHECK-LABEL: @test_x86_avx512_mask_psrlv_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -7745,8 +7745,8 @@ define <16 x i32> @test_x86_avx512_maskz_psrlv_d(<16 x i32> %a0, <16 x i32> %a1,
;
; CHECK-LABEL: @test_x86_avx512_maskz_psrlv_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -7773,7 +7773,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32>, <16 x i32>, <16 x i
define <8 x i64> @test_x86_avx512_psrlv_q(<8 x i64> %a0, <8 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrlv_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64>
@@ -7791,9 +7791,9 @@ define <8 x i64> @test_x86_avx512_mask_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, <8
;
; CHECK-LABEL: @test_x86_avx512_mask_psrlv_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64>
@@ -7819,8 +7819,8 @@ define <8 x i64> @test_x86_avx512_maskz_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, i8
;
; CHECK-LABEL: @test_x86_avx512_maskz_psrlv_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64>
@@ -7847,7 +7847,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64>, <8 x i64>, <8 x i64>,
define <8 x i64> @test_x86_avx512_psrlv_q_memop(<8 x i64> %a0, ptr %ptr) #0 {
;
; CHECK-LABEL: @test_x86_avx512_psrlv_q_memop(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -7893,8 +7893,8 @@ define <8 x double>@test_int_x86_avx512_mask_cvt_dq2pd_512(<8 x i32> %x0, <8 x d
;
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_dq2pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[CVT:%.*]] = sitofp <8 x i32> [[X0:%.*]] to <8 x double>
@@ -7934,8 +7934,8 @@ define <8 x double>@test_int_x86_avx512_mask_cvt_udq2pd_512(<8 x i32> %x0, <8 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_udq2pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[CVT:%.*]] = uitofp <8 x i32> [[X0:%.*]] to <8 x double>
@@ -7998,8 +7998,8 @@ define <16 x float> @test_x86_vcvtph2ps_512_rrk(<16 x i16> %a0,<16 x float> %a1,
;
; CHECK-LABEL: @test_x86_vcvtph2ps_512_rrk(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -8025,7 +8025,7 @@ define <16 x float> @test_x86_vcvtph2ps_512_sae_rrkz(<16 x i16> %a0, i16 %mask)
;
; CHECK-LABEL: @test_x86_vcvtph2ps_512_sae_rrkz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i16> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -8048,7 +8048,7 @@ define <16 x float> @test_x86_vcvtph2ps_512_rrkz(<16 x i16> %a0, i16 %mask) #0
;
; CHECK-LABEL: @test_x86_vcvtph2ps_512_rrkz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i16> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -8071,7 +8071,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16>, <16 x float
define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) #0 {
; CHECK-LABEL: @test_valign_q(
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
@@ -8086,10 +8086,10 @@ define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) #0 {
define <8 x i64> @test_mask_valign_q(<8 x i64> %a, <8 x i64> %b, <8 x i64> %src, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_valign_q(
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
; CHECK-NEXT: [[PALIGNR:%.*]] = shufflevector <8 x i64> [[B:%.*]], <8 x i64> [[A:%.*]], <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
@@ -8113,9 +8113,9 @@ declare <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64>, <8 x i64>, i32,
define <16 x i32> @test_maskz_valign_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_valign_d(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20>
; CHECK-NEXT: [[PALIGNR:%.*]] = shufflevector <16 x i32> [[B:%.*]], <16 x i32> [[A:%.*]], <16 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20>
@@ -8141,7 +8141,7 @@ declare <8 x double> @llvm.x86.avx512.mask.vpermilvar.pd.512(<8 x double>, <8 x
define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3>
; CHECK-NEXT: [[X0:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double>
@@ -8166,9 +8166,9 @@ define <8 x double>@test_int_x86_avx512_mask_vpermilvar_pd_512(<8 x double> %x0,
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermilvar_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3>
; CHECK-NEXT: [[X0:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double>
@@ -8203,8 +8203,8 @@ define <8 x double>@test_int_x86_avx512_maskz_vpermilvar_pd_512(<8 x double> %x0
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermilvar_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3>
; CHECK-NEXT: [[X0:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double>
@@ -8239,7 +8239,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float>, <16 x
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4>
; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
@@ -8264,9 +8264,9 @@ define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512(<16 x float> %x0,
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermilvar_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4>
; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
@@ -8302,8 +8302,8 @@ define <16 x float>@test_int_x86_avx512_maskz_vpermilvar_ps_512(<16 x float> %x0
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermilvar_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4>
; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
@@ -8338,8 +8338,8 @@ define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(<16
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
; CHECK-NEXT: [[TMP7:%.*]] = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> [[X0]], <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 3, i32 2, i32 1, i32 0, i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3>)
@@ -8390,7 +8390,7 @@ define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(<16
define <8 x i64> @test_mask_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_mask_mul_epi32_rr(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64>
@@ -8421,9 +8421,9 @@ define <8 x i64> @test_mask_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64
;
; CHECK-LABEL: @test_mask_mul_epi32_rrk(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64>
@@ -8462,8 +8462,8 @@ define <8 x i64> @test_mask_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mas
;
; CHECK-LABEL: @test_mask_mul_epi32_rrkz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64>
@@ -8501,7 +8501,7 @@ define <8 x i64> @test_mask_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mas
define <8 x i64> @test_mask_mul_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 {
;
; CHECK-LABEL: @test_mask_mul_epi32_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -8544,10 +8544,10 @@ define <8 x i64> @test_mask_mul_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 {
define <8 x i64> @test_mask_mul_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_mul_epi32_rmk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -8597,9 +8597,9 @@ define <8 x i64> @test_mask_mul_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %
define <8 x i64> @test_mask_mul_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_mul_epi32_rmkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -8649,9 +8649,9 @@ define <8 x i64> @test_mask_mul_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask)
define <8 x i64> @test_mask_mul_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_mul_epi32_rmb(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -8703,12 +8703,12 @@ define <8 x i64> @test_mask_mul_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %
define <8 x i64> @test_mask_mul_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_mul_epi32_rmbk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP35:%.*]], label [[TMP36:%.*]], !prof [[PROF1]]
@@ -8767,11 +8767,11 @@ define <8 x i64> @test_mask_mul_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64>
define <8 x i64> @test_mask_mul_epi32_rmbk_buildvector(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param) #0 {
;
; CHECK-LABEL: @test_mask_mul_epi32_rmbk_buildvector(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP35:%.*]], !prof [[PROF1]]
@@ -8848,11 +8848,11 @@ define <8 x i64> @test_mask_mul_epi32_rmbk_buildvector(<16 x i32> %a, ptr %ptr_b
define <8 x i64> @test_mask_mul_epi32_rmbkz(<16 x i32> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_mul_epi32_rmbkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP34:%.*]], label [[TMP35:%.*]], !prof [[PROF1]]
@@ -8911,10 +8911,10 @@ define <8 x i64> @test_mask_mul_epi32_rmbkz(<16 x i32> %a, ptr %ptr_b, i8 %mask,
define <8 x i64> @test_mask_mul_epi32_rmbkz_buildvector(<16 x i32> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param) #0 {
;
; CHECK-LABEL: @test_mask_mul_epi32_rmbkz_buildvector(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP34:%.*]], !prof [[PROF1]]
@@ -8993,7 +8993,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32>, <16 x i32>, <8 x
define <8 x i64> @test_mask_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_mask_mul_epu32_rr(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64>
@@ -9024,9 +9024,9 @@ define <8 x i64> @test_mask_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64
;
; CHECK-LABEL: @test_mask_mul_epu32_rrk(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64>
@@ -9065,8 +9065,8 @@ define <8 x i64> @test_mask_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mas
;
; CHECK-LABEL: @test_mask_mul_epu32_rrkz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64>
@@ -9104,7 +9104,7 @@ define <8 x i64> @test_mask_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mas
define <8 x i64> @test_mask_mul_epu32_rm(<16 x i32> %a, ptr %ptr_b) #0 {
;
; CHECK-LABEL: @test_mask_mul_epu32_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -9147,10 +9147,10 @@ define <8 x i64> @test_mask_mul_epu32_rm(<16 x i32> %a, ptr %ptr_b) #0 {
define <8 x i64> @test_mask_mul_epu32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_mul_epu32_rmk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -9200,9 +9200,9 @@ define <8 x i64> @test_mask_mul_epu32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %
define <8 x i64> @test_mask_mul_epu32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_mul_epu32_rmkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -9252,9 +9252,9 @@ define <8 x i64> @test_mask_mul_epu32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask)
define <8 x i64> @test_mask_mul_epu32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_mul_epu32_rmb(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -9306,12 +9306,12 @@ define <8 x i64> @test_mask_mul_epu32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %
define <8 x i64> @test_mask_mul_epu32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_mul_epu32_rmbk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP35:%.*]], label [[TMP36:%.*]], !prof [[PROF1]]
@@ -9370,11 +9370,11 @@ define <8 x i64> @test_mask_mul_epu32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64>
define <8 x i64> @test_mask_mul_epu32_rmbkz(<16 x i32> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 {
;
; CHECK-LABEL: @test_mask_mul_epu32_rmbkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP34:%.*]], label [[TMP35:%.*]], !prof [[PROF1]]
@@ -9435,8 +9435,8 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32>, <16 x i32>, <8
define <4 x float> @test_mask_vextractf32x4(<4 x float> %b, <16 x float> %a, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_vextractf32x4(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <4 x i32> <i32 8, i32 9, i32 10, i32 11>
@@ -9465,8 +9465,8 @@ declare <4 x float> @llvm.x86.avx512.mask.vextractf32x4.512(<16 x float>, i32, <
define <4 x i64> @test_mask_vextracti64x4(<4 x i64> %b, <8 x i64> %a, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_vextracti64x4(
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -9494,7 +9494,7 @@ define <4 x i32> @test_maskz_vextracti32x4(<16 x i32> %a, i8 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_vextracti32x4(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <4 x i32> <i32 8, i32 9, i32 10, i32 11>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[A:%.*]], <16 x i32> [[A]], <4 x i32> <i32 8, i32 9, i32 10, i32 11>
@@ -9536,7 +9536,7 @@ declare <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float>, <4 x fl
define <16 x float>@test_int_x86_avx512_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, <16 x float> %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_insertf32x4_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
@@ -9553,10 +9553,10 @@ define <16 x float>@test_int_x86_avx512_insertf32x4_512(<16 x float> %x0, <4 x f
define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, <16 x float> %x3, i16 %x4) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask_insertf32x4_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x float> [[X1:%.*]], <4 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
@@ -9582,9 +9582,9 @@ define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <
define <16 x float>@test_int_x86_avx512_maskz_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, i16 %x4) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_insertf32x4_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> [[X1:%.*]], <4 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
@@ -9610,7 +9610,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32>, <4 x i32>,
define <16 x i32>@test_int_x86_avx512_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, <16 x i32> %x3, i16 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_inserti32x4_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
@@ -9627,10 +9627,10 @@ define <16 x i32>@test_int_x86_avx512_inserti32x4_512(<16 x i32> %x0, <4 x i32>
define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, <16 x i32> %x3, i16 %x4) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask_inserti32x4_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[X1:%.*]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
@@ -9654,9 +9654,9 @@ define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x
define <16 x i32>@test_int_x86_avx512_maskz_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, i16 %x4) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_inserti32x4_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[X1:%.*]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
@@ -9681,7 +9681,7 @@ declare <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double>, <4 x do
define <8 x double>@test_int_x86_avx512_insertf64x4_512(<8 x double> %x0, <4 x double> %x1, <8 x double> %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_insertf64x4_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> splat (i64 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
@@ -9698,10 +9698,10 @@ define <8 x double>@test_int_x86_avx512_insertf64x4_512(<8 x double> %x0, <4 x d
define <8 x double>@test_int_x86_avx512_mask_insertf64x4_512(<8 x double> %x0, <4 x double> %x1, <8 x double> %x3, i8 %x4) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask_insertf64x4_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> splat (i64 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x double> [[X1:%.*]], <4 x double> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
@@ -9727,9 +9727,9 @@ define <8 x double>@test_int_x86_avx512_mask_insertf64x4_512(<8 x double> %x0, <
define <8 x double>@test_int_x86_avx512_maskz_insertf64x4_512(<8 x double> %x0, <4 x double> %x1, i8 %x4) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_insertf64x4_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> splat (i64 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x double> [[X1:%.*]], <4 x double> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
@@ -9755,7 +9755,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64>, <4 x i64>, i3
define <8 x i64>@test_int_x86_avx512_inserti64x4_512(<8 x i64> %x0, <4 x i64> %x1, <8 x i64> %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_inserti64x4_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> splat (i64 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
@@ -9772,10 +9772,10 @@ define <8 x i64>@test_int_x86_avx512_inserti64x4_512(<8 x i64> %x0, <4 x i64> %x
define <8 x i64>@test_int_x86_avx512_mask_inserti64x4_512(<8 x i64> %x0, <4 x i64> %x1, <8 x i64> %x3, i8 %x4) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask_inserti64x4_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> splat (i64 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i64> [[X1:%.*]], <4 x i64> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
@@ -9799,9 +9799,9 @@ define <8 x i64>@test_int_x86_avx512_mask_inserti64x4_512(<8 x i64> %x0, <4 x i6
define <8 x i64>@test_int_x86_avx512_maskz_inserti64x4_512(<8 x i64> %x0, <4 x i64> %x1, i8 %x4) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_inserti64x4_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> splat (i64 -1), <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[X1:%.*]], <4 x i64> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
@@ -9850,8 +9850,8 @@ declare <8 x i64> @llvm.x86.avx512.movntdqa(ptr) nounwind readonly
define <8 x i16> @test_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1, <8 x i16> %extra_param) #0 {
; CHECK-LABEL: @test_cmp_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP77:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP77:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
@@ -9971,9 +9971,9 @@ define <8 x i16> @test_mask_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask,
;
; CHECK-LABEL: @test_mask_cmp_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP146:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP146:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i32> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
@@ -10162,8 +10162,8 @@ declare i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32>, <16 x i32>, i32, i16) no
define <8 x i16> @test_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1, <8 x i16> %extra_param) #0 {
; CHECK-LABEL: @test_ucmp_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP69:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP69:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
@@ -10275,9 +10275,9 @@ define <8 x i16> @test_mask_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask
;
; CHECK-LABEL: @test_mask_ucmp_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP138:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP138:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i32> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
@@ -10458,8 +10458,8 @@ declare i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32>, <16 x i32>, i32, i16) n
define <8 x i8> @test_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i8> %extra_param) #0 {
; CHECK-LABEL: @test_cmp_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP77:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP77:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
@@ -10579,9 +10579,9 @@ define <8 x i8> @test_mask_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask, <8
;
; CHECK-LABEL: @test_mask_cmp_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP146:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP146:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
@@ -10770,8 +10770,8 @@ declare i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64>, <8 x i64>, i32, i8) nounwi
define <8 x i8> @test_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i8> %extra_param) #0 {
; CHECK-LABEL: @test_ucmp_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP69:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP69:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
@@ -10883,9 +10883,9 @@ define <8 x i8> @test_mask_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask, <8
;
; CHECK-LABEL: @test_mask_ucmp_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP138:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP138:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
@@ -11069,8 +11069,8 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512(<4 x float> %x0,
;
; CHECK-LABEL: @test_int_x86_avx512_mask_broadcastf32x4_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP1]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> [[X0:%.*]], <4 x float> [[X0]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -11116,8 +11116,8 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512_load(ptr %x0ptr,
;
; CHECK-LABEL: @test_int_x86_avx512_mask_broadcastf32x4_512_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -11169,8 +11169,8 @@ define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0
;
; CHECK-LABEL: @test_int_x86_avx512_mask_broadcastf64x4_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x double> [[X0:%.*]], <4 x double> [[X0]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -11195,7 +11195,7 @@ define <8 x double>@test_int_x86_avx512_maskz_broadcastf64x4_512(<4 x double> %x
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_broadcastf64x4_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x double> [[X0:%.*]], <4 x double> [[X0]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -11219,8 +11219,8 @@ define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512_load(ptr %x0ptr,
;
; CHECK-LABEL: @test_int_x86_avx512_mask_broadcastf64x4_512_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -11259,8 +11259,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_broadcas
;
; CHECK-LABEL: @test_int_x86_avx512_mask_broadcasti32x4_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP1]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[X0:%.*]], <4 x i32> [[X0]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -11306,8 +11306,8 @@ define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512_load(ptr %x0ptr, <
;
; CHECK-LABEL: @test_int_x86_avx512_mask_broadcasti32x4_512_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -11357,8 +11357,8 @@ define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_broadcasti64x4_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[X0:%.*]], <4 x i64> [[X0]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -11381,7 +11381,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_broadcasti64x4_512(<4 x i64> %x0, i8
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_broadcasti64x4_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i64> [[X0:%.*]], <4 x i64> [[X0]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -11404,8 +11404,8 @@ define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512_load(ptr %x0ptr, <8
;
; CHECK-LABEL: @test_int_x86_avx512_mask_broadcasti64x4_512_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -11457,8 +11457,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pabs_d_512(<16 x i32> %x0, <16 x i32>
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pabs_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <16 x i32> [[X0:%.*]], splat (i32 -2147483648)
; CHECK-NEXT: [[TMP13:%.*]] = select <16 x i1> [[TMP12]], <16 x i32> splat (i32 -1), <16 x i32> [[TMP1]]
@@ -11500,8 +11500,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pabs_q_512(<8 x i64> %x0, <8 x i64> %x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pabs_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <8 x i64> [[X0:%.*]], splat (i64 -9223372036854775808)
; CHECK-NEXT: [[TMP13:%.*]] = select <8 x i1> [[TMP12]], <8 x i64> splat (i64 -1), <8 x i64> [[TMP1]]
@@ -11526,8 +11526,8 @@ define i8 @test_vptestmq(<8 x i64> %a0, <8 x i64> %a1, i8 %m) #0 {
;
; CHECK-LABEL: @test_vptestmq(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = and <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i64> [[A0:%.*]], [[TMP2]]
@@ -11585,8 +11585,8 @@ define i16 @test_vptestmd(<16 x i32> %a0, <16 x i32> %a1, i16 %m) #0 {
;
; CHECK-LABEL: @test_vptestmd(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = and <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i32> [[A0:%.*]], [[TMP2]]
@@ -11646,8 +11646,8 @@ define i16@test_int_x86_avx512_ptestnm_d_512(<16 x i32> %x0, <16 x i32> %x1, i16
;
; CHECK-LABEL: @test_int_x86_avx512_ptestnm_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = and <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i32> [[X0:%.*]], [[TMP2]]
@@ -11706,8 +11706,8 @@ define i8@test_int_x86_avx512_ptestnm_q_512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2
;
; CHECK-LABEL: @test_int_x86_avx512_ptestnm_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = and <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i64> [[X0:%.*]], [[TMP2]]
@@ -11765,7 +11765,7 @@ define i16 @test_kand(i16 %a0, i16 %a1) #0 {
;
; CHECK-LABEL: @test_kand(
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[A0:%.*]] to <16 x i1>
@@ -11802,7 +11802,7 @@ define i16 @test_kandn(i16 %a0, i16 %a1) #0 {
;
; CHECK-LABEL: @test_kandn(
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[A0:%.*]] to <16 x i1>
@@ -11862,7 +11862,7 @@ define i16 @test_kor(i16 %a0, i16 %a1) #0 {
;
; CHECK-LABEL: @test_kor(
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[A0:%.*]] to <16 x i1>
@@ -11904,7 +11904,7 @@ define i16 @test_kxnor(i16 %a0, i16 %a1) #0 {
;
; CHECK-LABEL: @test_kxnor(
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[A0:%.*]] to <16 x i1>
@@ -11937,7 +11937,7 @@ define i16 @test_kxor(i16 %a0, i16 %a1) #0 {
;
; CHECK-LABEL: @test_kxor(
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[A0:%.*]] to <16 x i1>
@@ -11966,9 +11966,9 @@ define i32 @test_kortestz(<8 x i64> %A, <8 x i64> %B, <8 x i64> %C, <8 x i64> %D
; CHECK-LABEL: @test_kortestz(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP0]] to <16 x i32>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[A:%.*]] to <16 x i32>
@@ -12043,9 +12043,9 @@ define i32 @test_kortestc(<8 x i64> %A, <8 x i64> %B, <8 x i64> %C, <8 x i64> %D
; CHECK-LABEL: @test_kortestc(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP0]] to <16 x i32>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[A:%.*]] to <16 x i32>
@@ -12118,7 +12118,7 @@ entry:
define i16 @test_cmpps(<16 x float> %a, <16 x float> %b) #0 {
; CHECK-LABEL: @test_cmpps(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -12143,7 +12143,7 @@ declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> , <16 x float> , i32,
define i8 @test_cmppd(<8 x double> %a, <8 x double> %b) #0 {
; CHECK-LABEL: @test_cmppd(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -12168,7 +12168,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> , <8 x double> , i32, i
define <8 x i64> @test_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_mul_epi32_rr(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64>
@@ -12199,9 +12199,9 @@ define <8 x i64> @test_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %pa
;
; CHECK-LABEL: @test_mul_epi32_rrk(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64>
@@ -12242,8 +12242,8 @@ define <8 x i64> @test_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) #
;
; CHECK-LABEL: @test_mul_epi32_rrkz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64>
@@ -12283,7 +12283,7 @@ define <8 x i64> @test_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) #
define <8 x i64> @test_mul_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 {
;
; CHECK-LABEL: @test_mul_epi32_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -12326,10 +12326,10 @@ define <8 x i64> @test_mul_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 {
define <8 x i64> @test_mul_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mul_epi32_rmk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -12381,9 +12381,9 @@ define <8 x i64> @test_mul_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passT
define <8 x i64> @test_mul_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mul_epi32_rmkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -12435,8 +12435,8 @@ define <8 x i64> @test_mul_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) #0 {
define <8 x i64> @test_mul_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %extra_param) #0 {
;
; CHECK-LABEL: @test_mul_epi32_rmb(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -12488,11 +12488,11 @@ define <8 x i64> @test_mul_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %extra
define <8 x i64> @test_mul_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param) #0 {
;
; CHECK-LABEL: @test_mul_epi32_rmbk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP33:%.*]], !prof [[PROF1]]
@@ -12553,10 +12553,10 @@ define <8 x i64> @test_mul_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %pass
define <8 x i64> @test_mul_epi32_rmbkz(<16 x i32> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param) #0 {
;
; CHECK-LABEL: @test_mul_epi32_rmbkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP32:%.*]], !prof [[PROF1]]
@@ -12619,7 +12619,7 @@ declare <8 x i64> @llvm.x86.avx512.pmul.dq.512(<16 x i32>, <16 x i32>)
define <8 x i64> @test_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_mul_epu32_rr(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64>
@@ -12650,9 +12650,9 @@ define <8 x i64> @test_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %pa
;
; CHECK-LABEL: @test_mul_epu32_rrk(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64>
@@ -12693,8 +12693,8 @@ define <8 x i64> @test_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) #
;
; CHECK-LABEL: @test_mul_epu32_rrkz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64>
@@ -12734,7 +12734,7 @@ define <8 x i64> @test_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) #
define <8 x i64> @test_mul_epu32_rm(<16 x i32> %a, ptr %ptr_b) #0 {
;
; CHECK-LABEL: @test_mul_epu32_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -12777,10 +12777,10 @@ define <8 x i64> @test_mul_epu32_rm(<16 x i32> %a, ptr %ptr_b) #0 {
define <8 x i64> @test_mul_epu32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mul_epu32_rmk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -12832,9 +12832,9 @@ define <8 x i64> @test_mul_epu32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passT
define <8 x i64> @test_mul_epu32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mul_epu32_rmkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -12886,8 +12886,8 @@ define <8 x i64> @test_mul_epu32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) #0 {
define <8 x i64> @test_mul_epu32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %extra_param) #0 {
;
; CHECK-LABEL: @test_mul_epu32_rmb(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -12939,11 +12939,11 @@ define <8 x i64> @test_mul_epu32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %extra
define <8 x i64> @test_mul_epu32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param) #0 {
;
; CHECK-LABEL: @test_mul_epu32_rmbk(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP33:%.*]], !prof [[PROF1]]
@@ -13004,10 +13004,10 @@ define <8 x i64> @test_mul_epu32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %pass
define <8 x i64> @test_mul_epu32_rmbkz(<16 x i32> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param) #0 {
;
; CHECK-LABEL: @test_mul_epu32_rmbkz(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP32:%.*]], !prof [[PROF1]]
@@ -13070,7 +13070,7 @@ declare <8 x i64> @llvm.x86.avx512.pmulu.dq.512(<16 x i32>, <16 x i32>)
define <2 x double> @test_x86_avx512_mm_cvtu32_sd(<2 x double> %a, i32 %b)
;
; CHECK-LABEL: @test_x86_avx512_mm_cvtu32_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP1]] to i64
@@ -13187,7 +13187,7 @@ declare <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double>, <8 x i64
define <8 x double>@test_int_x86_avx512_permvar_df_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_permvar_df_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -13211,9 +13211,9 @@ define <8 x double>@test_int_x86_avx512_mask_permvar_df_512(<8 x double> %x0, <8
;
; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_df_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -13247,8 +13247,8 @@ define <8 x double>@test_int_x86_avx512_maskz_permvar_df_512(<8 x double> %x0, <
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_df_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -13282,7 +13282,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64>, <8 x i64>, <8
define <8 x i64>@test_int_x86_avx512_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_permvar_di_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -13297,9 +13297,9 @@ define <8 x i64>@test_int_x86_avx512_mask_permvar_di_512(<8 x i64> %x0, <8 x i64
;
; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_di_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -13322,8 +13322,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_permvar_di_512(<8 x i64> %x0, <8 x i6
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_di_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -13347,7 +13347,7 @@ declare <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float>, <16 x i3
define <16 x float>@test_int_x86_avx512_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_permvar_sf_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -13371,9 +13371,9 @@ define <16 x float>@test_int_x86_avx512_mask_permvar_sf_512(<16 x float> %x0, <1
;
; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_sf_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -13407,8 +13407,8 @@ define <16 x float>@test_int_x86_avx512_maskz_permvar_sf_512(<16 x float> %x0, <
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_sf_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -13442,7 +13442,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_permvar_si_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_permvar_si_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -13457,9 +13457,9 @@ define <16 x i32>@test_int_x86_avx512_mask_permvar_si_512(<16 x i32> %x0, <16 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_si_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -13482,8 +13482,8 @@ define <16 x i32>@test_int_x86_avx512_maskz_permvar_si_512(<16 x i32> %x0, <16 x
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_si_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -13507,8 +13507,8 @@ declare <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pternlog_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -13535,9 +13535,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pternlog_d_512(<16 x i32> %x0, <16 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pternlog_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -13574,9 +13574,9 @@ define <16 x i32>@test_int_x86_avx512_maskz_pternlog_d_512(<16 x i32> %x0, <16 x
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pternlog_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -13612,8 +13612,8 @@ declare <8 x i64> @llvm.x86.avx512.mask.pternlog.q.512(<8 x i64>, <8 x i64>, <8
define <8 x i64>@test_int_x86_avx512_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pternlog_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -13640,9 +13640,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pternlog_q_512(<8 x i64> %x0, <8 x i64
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pternlog_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -13679,9 +13679,9 @@ define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i6
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pternlog_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -13716,10 +13716,10 @@ declare <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32>, <16 x i32>
define <16 x i32>@test_int_x86_avx512_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -13753,10 +13753,10 @@ define <16 x i32>@test_int_x86_avx512_vpermi2var_d_512(<16 x i32> %x0, <16 x i32
define <16 x i32>@test_int_x86_avx512_mask_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -13800,8 +13800,8 @@ declare <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double>, <8 x
define <8 x double>@test_int_x86_avx512_vpermi2var_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = trunc <8 x i64> [[TMP8]] to <8 x i3>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double>
@@ -13828,9 +13828,9 @@ define <8 x double>@test_int_x86_avx512_mask_vpermi2var_pd_512(<8 x double> %x0,
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = trunc <8 x i64> [[TMP2]] to <8 x i3>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double>
@@ -13868,8 +13868,8 @@ declare <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float>, <16 x
define <16 x float>@test_int_x86_avx512_vpermi2var_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = trunc <16 x i32> [[TMP8]] to <16 x i4>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
@@ -13896,9 +13896,9 @@ define <16 x float>@test_int_x86_avx512_mask_vpermi2var_ps_512(<16 x float> %x0,
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = trunc <16 x i32> [[TMP2]] to <16 x i4>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
@@ -13936,8 +13936,8 @@ declare <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64>, <8 x i64>, <
define <8 x i64>@test_int_x86_avx512_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3>
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> [[X3:%.*]], <8 x i64> [[TMP3]])
@@ -13960,9 +13960,9 @@ define <8 x i64>@test_int_x86_avx512_mask_vpermi2var_q_512(<8 x i64> %x0, <8 x i
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = trunc <8 x i64> [[TMP2]] to <8 x i3>
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> [[X1:%.*]], <8 x i64> [[TMP3]])
@@ -13994,10 +13994,10 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32>, <16 x i32
define <16 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, i16 %x3) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -14041,11 +14041,11 @@ declare <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64>, <8 x do
define <8 x double>@test_int_x86_avx512_maskz_vpermt2var_pd_512(<8 x i64> %x0, <8 x double> %x1, ptr %x2ptr, i8 %x3, <8 x double> %extra_param) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[X0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP12:%.*]], !prof [[PROF1]]
@@ -14099,10 +14099,10 @@ declare <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32>, <16 x
define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = trunc <16 x i32> [[X0]] to <16 x i4>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
@@ -14139,10 +14139,10 @@ declare <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = trunc <8 x i64> [[X0]] to <8 x i3>
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> [[X4:%.*]], <8 x i64> [[TMP3]])
@@ -14173,8 +14173,8 @@ declare <16 x i32> @llvm.x86.avx512.mask.vpermt2var.d.512(<16 x i32>, <16 x i32>
define <16 x i32>@test_int_x86_avx512_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermt2var_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = trunc <16 x i32> [[X0]] to <16 x i4>
@@ -14197,10 +14197,10 @@ define <16 x i32>@test_int_x86_avx512_vpermt2var_d_512(<16 x i32> %x0, <16 x i32
define <16 x i32>@test_int_x86_avx512_mask_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermt2var_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = trunc <16 x i32> [[X0]] to <16 x i4>
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> [[TMP1]], <16 x i32> [[X4:%.*]], <16 x i32> [[TMP3]])
@@ -14234,7 +14234,7 @@ declare <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double>, <8 x double>
define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vsubps_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -14258,7 +14258,7 @@ define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vsubps_rd(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -14282,7 +14282,7 @@ define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vsubps_ru(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -14306,7 +14306,7 @@ define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vsubps_rz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -14330,7 +14330,7 @@ define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vmulps_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -14354,7 +14354,7 @@ define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vmulps_rd(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -14378,7 +14378,7 @@ define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vmulps_ru(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -14402,7 +14402,7 @@ define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vmulps_rz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -14428,8 +14428,8 @@ define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16
;
; CHECK-LABEL: @test_vmulps_mask_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -14463,8 +14463,8 @@ define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16
;
; CHECK-LABEL: @test_vmulps_mask_rd(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -14498,8 +14498,8 @@ define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16
;
; CHECK-LABEL: @test_vmulps_mask_ru(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -14533,8 +14533,8 @@ define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16
;
; CHECK-LABEL: @test_vmulps_mask_rz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -14569,9 +14569,9 @@ define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float>
;
; CHECK-LABEL: @test_vmulps_mask_passthru_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -14606,9 +14606,9 @@ define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float>
;
; CHECK-LABEL: @test_vmulps_mask_passthru_rd(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -14643,9 +14643,9 @@ define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float>
;
; CHECK-LABEL: @test_vmulps_mask_passthru_ru(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -14680,9 +14680,9 @@ define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float>
;
; CHECK-LABEL: @test_vmulps_mask_passthru_rz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -14718,8 +14718,8 @@ define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8
;
; CHECK-LABEL: @test_vmulpd_mask_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -14753,8 +14753,8 @@ define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8
;
; CHECK-LABEL: @test_vmulpd_mask_rd(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -14788,8 +14788,8 @@ define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8
;
; CHECK-LABEL: @test_vmulpd_mask_ru(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -14823,8 +14823,8 @@ define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8
;
; CHECK-LABEL: @test_vmulpd_mask_rz(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -14858,8 +14858,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_maskz_add_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -14891,8 +14891,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rd_sae(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_maskz_add_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -14924,8 +14924,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_ru_sae(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_maskz_add_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -14958,8 +14958,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rz_sae(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_maskz_add_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -14993,8 +14993,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_current(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_maskz_add_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -15027,9 +15027,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_rn_sae(<16 x float> %a0, <16 x
;
; CHECK-LABEL: @test_mm512_mask_add_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15062,9 +15062,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_rd_sae(<16 x float> %a0, <16 x
;
; CHECK-LABEL: @test_mm512_mask_add_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15097,9 +15097,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_ru_sae(<16 x float> %a0, <16 x
;
; CHECK-LABEL: @test_mm512_mask_add_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15133,9 +15133,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_rz_sae(<16 x float> %a0, <16 x
;
; CHECK-LABEL: @test_mm512_mask_add_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15170,9 +15170,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_mask_add_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15206,7 +15206,7 @@ define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_add_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -15228,7 +15228,7 @@ define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_add_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -15250,7 +15250,7 @@ define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_add_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -15273,7 +15273,7 @@ define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_add_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -15296,7 +15296,7 @@ define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_add_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -15321,9 +15321,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rn_sae(<16 x float> %a0, <16 x
;
; CHECK-LABEL: @test_mm512_mask_sub_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15356,9 +15356,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rd_sae(<16 x float> %a0, <16 x
;
; CHECK-LABEL: @test_mm512_mask_sub_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15391,9 +15391,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_ru_sae(<16 x float> %a0, <16 x
;
; CHECK-LABEL: @test_mm512_mask_sub_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15427,9 +15427,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rz_sae(<16 x float> %a0, <16 x
;
; CHECK-LABEL: @test_mm512_mask_sub_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15464,9 +15464,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_mask_sub_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15499,7 +15499,7 @@ define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_sub_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -15521,7 +15521,7 @@ define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_sub_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -15543,7 +15543,7 @@ define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_sub_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -15566,7 +15566,7 @@ define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_sub_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -15589,7 +15589,7 @@ define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_sub_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -15613,8 +15613,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rn_sae(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_maskz_div_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -15646,8 +15646,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rd_sae(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_maskz_div_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -15679,8 +15679,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_ru_sae(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_maskz_div_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -15713,8 +15713,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rz_sae(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_maskz_div_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -15748,8 +15748,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_current(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_maskz_div_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -15782,9 +15782,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_rn_sae(<16 x float> %a0, <16 x
;
; CHECK-LABEL: @test_mm512_mask_div_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15817,9 +15817,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_rd_sae(<16 x float> %a0, <16 x
;
; CHECK-LABEL: @test_mm512_mask_div_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15852,9 +15852,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_ru_sae(<16 x float> %a0, <16 x
;
; CHECK-LABEL: @test_mm512_mask_div_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15888,9 +15888,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_rz_sae(<16 x float> %a0, <16 x
;
; CHECK-LABEL: @test_mm512_mask_div_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15925,9 +15925,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_mask_div_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -15961,7 +15961,7 @@ define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_div_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -15983,7 +15983,7 @@ define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_div_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -16005,7 +16005,7 @@ define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_div_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -16028,7 +16028,7 @@ define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_div_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -16051,7 +16051,7 @@ define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_div_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -16075,9 +16075,9 @@ declare <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float>, <16 x float>
define void @test_mask_compress_store_pd_512(ptr %addr, <8 x double> %data, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_compress_store_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -16107,7 +16107,7 @@ define void @test_compress_store_pd_512(ptr %addr, <8 x double> %data) #0 {
;
; CHECK-LABEL: @test_compress_store_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
@@ -16129,9 +16129,9 @@ define void @test_compress_store_pd_512(ptr %addr, <8 x double> %data) #0 {
define void @test_mask_compress_store_ps_512(ptr %addr, <16 x float> %data, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_compress_store_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -16161,7 +16161,7 @@ define void @test_compress_store_ps_512(ptr %addr, <16 x float> %data) #0 {
;
; CHECK-LABEL: @test_compress_store_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
@@ -16183,9 +16183,9 @@ define void @test_compress_store_ps_512(ptr %addr, <16 x float> %data) #0 {
define void @test_mask_compress_store_q_512(ptr %addr, <8 x i64> %data, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_compress_store_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -16215,7 +16215,7 @@ define void @test_compress_store_q_512(ptr %addr, <8 x i64> %data) #0 {
;
; CHECK-LABEL: @test_compress_store_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
@@ -16237,9 +16237,9 @@ define void @test_compress_store_q_512(ptr %addr, <8 x i64> %data) #0 {
define void @test_mask_compress_store_d_512(ptr %addr, <16 x i32> %data, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_compress_store_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -16269,7 +16269,7 @@ define void @test_compress_store_d_512(ptr %addr, <16 x i32> %data) #0 {
;
; CHECK-LABEL: @test_compress_store_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
@@ -16291,9 +16291,9 @@ define void @test_compress_store_d_512(ptr %addr, <16 x i32> %data) #0 {
define <8 x double> @test_mask_expand_load_pd_512(ptr %addr, <8 x double> %data, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_expand_load_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -16321,7 +16321,7 @@ define <8 x double> @test_mask_expand_load_pd_512(ptr %addr, <8 x double> %data,
define <8 x double> @test_maskz_expand_load_pd_512(ptr %addr, i8 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_expand_load_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
@@ -16353,7 +16353,7 @@ define <8 x double> @test_expand_load_pd_512(ptr %addr, <8 x double> %data) #0
;
; CHECK-LABEL: @test_expand_load_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
@@ -16377,7 +16377,7 @@ define <8 x double> @test_expand_load_pd_512(ptr %addr, <8 x double> %data) #0
define <8 x double> @test_zero_mask_expand_load_pd_512(ptr %addr, <8 x double> %data, i8 %mask) #0 {
; CHECK-LABEL: @test_zero_mask_expand_load_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
@@ -16400,9 +16400,9 @@ define <8 x double> @test_zero_mask_expand_load_pd_512(ptr %addr, <8 x double> %
define <16 x float> @test_mask_expand_load_ps_512(ptr %addr, <16 x float> %data, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_expand_load_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -16430,7 +16430,7 @@ define <16 x float> @test_mask_expand_load_ps_512(ptr %addr, <16 x float> %data,
define <16 x float> @test_maskz_expand_load_ps_512(ptr %addr, i16 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_expand_load_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
@@ -16462,7 +16462,7 @@ define <16 x float> @test_expand_load_ps_512(ptr %addr, <16 x float> %data) #0
;
; CHECK-LABEL: @test_expand_load_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
@@ -16485,9 +16485,9 @@ define <16 x float> @test_expand_load_ps_512(ptr %addr, <16 x float> %data) #0
define <8 x i64> @test_mask_expand_load_q_512(ptr %addr, <8 x i64> %data, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_expand_load_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -16515,7 +16515,7 @@ define <8 x i64> @test_mask_expand_load_q_512(ptr %addr, <8 x i64> %data, i8 %ma
define <8 x i64> @test_maskz_expand_load_q_512(ptr %addr, i8 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_expand_load_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
@@ -16547,7 +16547,7 @@ define <8 x i64> @test_expand_load_q_512(ptr %addr, <8 x i64> %data) #0 {
;
; CHECK-LABEL: @test_expand_load_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
@@ -16570,9 +16570,9 @@ define <8 x i64> @test_expand_load_q_512(ptr %addr, <8 x i64> %data) #0 {
define <16 x i32> @test_mask_expand_load_d_512(ptr %addr, <16 x i32> %data, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_expand_load_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -16600,7 +16600,7 @@ define <16 x i32> @test_mask_expand_load_d_512(ptr %addr, <16 x i32> %data, i16
define <16 x i32> @test_maskz_expand_load_d_512(ptr %addr, i16 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_expand_load_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
@@ -16632,7 +16632,7 @@ define <16 x i32> @test_expand_load_d_512(ptr %addr, <16 x i32> %data) #0 {
;
; CHECK-LABEL: @test_expand_load_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
@@ -16656,8 +16656,8 @@ define <16 x float> @test_mm512_maskz_min_round_ps_sae(<16 x float> %a0, <16 x f
;
; CHECK-LABEL: @test_mm512_maskz_min_round_ps_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -16682,8 +16682,8 @@ define <16 x float> @test_mm512_maskz_min_round_ps_current(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_maskz_min_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -16708,9 +16708,9 @@ define <16 x float> @test_mm512_mask_min_round_ps_sae(<16 x float> %a0, <16 x fl
;
; CHECK-LABEL: @test_mm512_mask_min_round_ps_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -16736,9 +16736,9 @@ define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_mask_min_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -16763,7 +16763,7 @@ define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_min_round_ps_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -16778,7 +16778,7 @@ define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float>
define <16 x float> @test_mm512_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_min_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -16795,8 +16795,8 @@ define <16 x float> @test_mm512_maskz_max_round_ps_sae(<16 x float> %a0, <16 x f
;
; CHECK-LABEL: @test_mm512_maskz_max_round_ps_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -16821,8 +16821,8 @@ define <16 x float> @test_mm512_maskz_max_round_ps_current(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_maskz_max_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -16847,9 +16847,9 @@ define <16 x float> @test_mm512_mask_max_round_ps_sae(<16 x float> %a0, <16 x fl
;
; CHECK-LABEL: @test_mm512_mask_max_round_ps_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -16875,9 +16875,9 @@ define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16
;
; CHECK-LABEL: @test_mm512_mask_max_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -16902,7 +16902,7 @@ define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_max_round_ps_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -16917,7 +16917,7 @@ define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float>
define <16 x float> @test_mm512_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_max_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -16945,8 +16945,8 @@ define <8 x double> @test_mask_sqrt_pd_512(<8 x double> %a0, <8 x double> %passt
;
; CHECK-LABEL: @test_mask_sqrt_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x double> @llvm.sqrt.v8f64(<8 x double> [[A0:%.*]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP2]] to <8 x i1>
@@ -16969,7 +16969,7 @@ define <8 x double> @test_maskz_sqrt_pd_512(<8 x double> %a0, i8 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_sqrt_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x double> @llvm.sqrt.v8f64(<8 x double> [[A0:%.*]])
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP2]] to <8 x i1>
@@ -17009,8 +17009,8 @@ define <8 x double> @test_mask_sqrt_round_pd_512(<8 x double> %a0, <8 x double>
;
; CHECK-LABEL: @test_mask_sqrt_round_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -17040,7 +17040,7 @@ define <8 x double> @test_maskz_sqrt_round_pd_512(<8 x double> %a0, i8 %mask) #
;
; CHECK-LABEL: @test_maskz_sqrt_round_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -17082,8 +17082,8 @@ define <16 x float> @test_mask_sqrt_ps_512(<16 x float> %a0, <16 x float> %passt
;
; CHECK-LABEL: @test_mask_sqrt_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x float> @llvm.sqrt.v16f32(<16 x float> [[A0:%.*]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
@@ -17106,7 +17106,7 @@ define <16 x float> @test_maskz_sqrt_ps_512(<16 x float> %a0, i16 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_sqrt_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x float> @llvm.sqrt.v16f32(<16 x float> [[A0:%.*]])
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
@@ -17146,8 +17146,8 @@ define <16 x float> @test_mask_sqrt_round_ps_512(<16 x float> %a0, <16 x float>
;
; CHECK-LABEL: @test_mask_sqrt_round_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -17177,7 +17177,7 @@ define <16 x float> @test_maskz_sqrt_round_ps_512(<16 x float> %a0, i16 %mask)
;
; CHECK-LABEL: @test_maskz_sqrt_round_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -17209,7 +17209,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32>@test_int_x86_avx512_prolv_d_512_old(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_prolv_d_512_old(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -17227,9 +17227,9 @@ define <16 x i32>@test_int_x86_avx512_mask_prolv_d_512_old(<16 x i32> %x0, <16 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_prolv_d_512_old(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -17255,8 +17255,8 @@ define <16 x i32>@test_int_x86_avx512_maskz_prolv_d_512_old(<16 x i32> %x0, <16
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_prolv_d_512_old(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -17283,7 +17283,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64>, <8 x i64>, <8 x i
define <8 x i64>@test_int_x86_avx512_prolv_q_512_old(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_prolv_q_512_old(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64>
@@ -17301,9 +17301,9 @@ define <8 x i64>@test_int_x86_avx512_mask_prolv_q_512_old(<8 x i64> %x0, <8 x i6
;
; CHECK-LABEL: @test_int_x86_avx512_mask_prolv_q_512_old(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64>
@@ -17329,8 +17329,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_prolv_q_512_old(<8 x i64> %x0, <8 x i
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_prolv_q_512_old(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64>
@@ -17357,7 +17357,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32>@test_int_x86_avx512_prorv_d_512_old(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_prorv_d_512_old(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -17375,9 +17375,9 @@ define <16 x i32>@test_int_x86_avx512_mask_prorv_d_512_old(<16 x i32> %x0, <16 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask_prorv_d_512_old(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -17403,8 +17403,8 @@ define <16 x i32>@test_int_x86_avx512_maskz_prorv_d_512_old(<16 x i32> %x0, <16
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_prorv_d_512_old(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -17431,7 +17431,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64>, <8 x i64>, <8 x i
define <8 x i64>@test_int_x86_avx512_prorv_q_512_old(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_prorv_q_512_old(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64>
@@ -17449,9 +17449,9 @@ define <8 x i64>@test_int_x86_avx512_mask_prorv_q_512_old(<8 x i64> %x0, <8 x i6
;
; CHECK-LABEL: @test_int_x86_avx512_mask_prorv_q_512_old(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64>
@@ -17477,8 +17477,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_prorv_q_512_old(<8 x i64> %x0, <8 x i
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_prorv_q_512_old(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64>
@@ -17506,8 +17506,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_prol_d_5
;
; CHECK-LABEL: @test_int_x86_avx512_mask_prol_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> splat (i32 3))
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -17558,8 +17558,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_prol_q_512(
;
; CHECK-LABEL: @test_int_x86_avx512_mask_prol_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i64> splat (i64 3))
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -17610,8 +17610,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_pror_d_5
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pror_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> splat (i32 3))
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -17662,8 +17662,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_pror_q_512(
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pror_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i64> splat (i64 3))
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -17714,9 +17714,9 @@ define <2 x double>@test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x do
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vfmadd_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[X0:%.*]], i64 0
@@ -17812,9 +17812,9 @@ define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss(<4 x float> %x0, <4 x floa
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vfmadd_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[X0:%.*]], i64 0
@@ -17910,9 +17910,9 @@ define <2 x double>@test_int_x86_avx512_maskz_vfmadd_sd(<2 x double> %x0, <2 x d
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_vfmadd_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[X0:%.*]], i64 0
@@ -17983,9 +17983,9 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss(<4 x float> %x0, <4 x flo
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_vfmadd_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[X0:%.*]], i64 0
@@ -18055,9 +18055,9 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x d
;
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmadd_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[X0:%.*]], i64 0
@@ -18153,9 +18153,9 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x flo
;
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmadd_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[X0:%.*]], i64 0
@@ -18249,9 +18249,9 @@ define void @fmadd_ss_mask_memfold(ptr %a, ptr %b, i8 %c, <4 x float> %extra_par
;
; CHECK-LABEL: @fmadd_ss_mask_memfold(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
@@ -18352,9 +18352,9 @@ define void @fmadd_ss_maskz_memfold(ptr %a, ptr %b, i8 %c, <4 x float> %extra_pa
;
; CHECK-LABEL: @fmadd_ss_maskz_memfold(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
@@ -18454,9 +18454,9 @@ define void @fmadd_sd_mask_memfold(ptr %a, ptr %b, i8 %c, <2 x double> %extra_pa
;
; CHECK-LABEL: @fmadd_sd_mask_memfold(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
@@ -18545,9 +18545,9 @@ define void @fmadd_sd_maskz_memfold(ptr %a, ptr %b, i8 %c, <2 x double> %extra_p
;
; CHECK-LABEL: @fmadd_sd_maskz_memfold(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
@@ -18636,10 +18636,10 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmsub_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = fneg <2 x double> [[X2:%.*]]
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP2]], i64 0
@@ -18743,10 +18743,10 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmsub_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = fneg <4 x float> [[X2:%.*]]
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP2]], i64 0
@@ -18851,9 +18851,9 @@ define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x
;
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfnmsub_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = fneg <2 x double> [[X0:%.*]]
; CHECK-NEXT: [[TMP6:%.*]] = fneg <2 x double> [[X2:%.*]]
@@ -18961,9 +18961,9 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x fl
;
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfnmsub_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = fneg <4 x float> [[X0:%.*]]
; CHECK-NEXT: [[TMP6:%.*]] = fneg <4 x float> [[X2:%.*]]
@@ -19068,11 +19068,11 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x fl
define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1, ptr%ptr_b ,i8 %x3,i32 %x4, <4 x float> %extra_param) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmadd_ss_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP25:%.*]], !prof [[PROF1]]
@@ -19122,11 +19122,11 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x
define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,ptr%ptr_b ,i8 %x3,i32 %x4, <4 x float> %extra_param) #0 {
;
; CHECK-LABEL: @test_int_x86_avx512_mask_vfmadd_ss_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP25:%.*]], !prof [[PROF1]]
@@ -19176,10 +19176,10 @@ define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x f
define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,ptr%ptr_b ,i8 %x3,i32 %x4, <4 x float> %extra_param) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_vfmadd_ss_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP20:%.*]], !prof [[PROF1]]
@@ -19240,8 +19240,8 @@ define <8 x i32>@test_int_x86_avx512_mask_pmov_qd_512(<8 x i64> %x0, <8 x i32> %
;
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = trunc <8 x i64> [[X0:%.*]] to <8 x i32>
@@ -19264,7 +19264,7 @@ define <8 x i32>@test_int_x86_avx512_maskz_pmov_qd_512(<8 x i64> %x0, i8 %x2) #
;
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmov_qd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32>
; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i64> [[X0:%.*]] to <8 x i32>
@@ -19289,8 +19289,8 @@ define <16 x float> @test_int_x86_avx512_mask_cvt_dq2ps_512(<16 x i32> %x0, <16
;
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_dq2ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[CVT:%.*]] = sitofp <16 x i32> [[X0:%.*]] to <16 x float>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
@@ -19328,8 +19328,8 @@ define <16 x float> @test_int_x86_avx512_mask_cvt_udq2ps_512(<16 x i32> %x0, <16
;
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_udq2ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[CVT:%.*]] = uitofp <16 x i32> [[X0:%.*]] to <16 x float>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
@@ -19364,9 +19364,9 @@ define <16 x float> @test_int_x86_avx512_mask_cvt_udq2ps_512(<16 x i32> %x0, <16
define <8 x double> @test_mask_compress_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_compress_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -19394,7 +19394,7 @@ define <8 x double> @test_mask_compress_pd_512(<8 x double> %data, <8 x double>
define <8 x double> @test_maskz_compress_pd_512(<8 x double> %data, i8 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_compress_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
@@ -19420,7 +19420,7 @@ define <8 x double> @test_maskz_compress_pd_512(<8 x double> %data, i8 %mask) #
define <8 x double> @test_compress_pd_512(<8 x double> %data, <8 x double> %extra_param) #0 {
; CHECK-LABEL: @test_compress_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -19445,9 +19445,9 @@ declare <8 x double> @llvm.x86.avx512.mask.compress.pd.512(<8 x double> %data, <
define <16 x float> @test_mask_compress_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_compress_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -19475,7 +19475,7 @@ define <16 x float> @test_mask_compress_ps_512(<16 x float> %data, <16 x float>
define <16 x float> @test_maskz_compress_ps_512(<16 x float> %data, i16 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_compress_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
@@ -19501,7 +19501,7 @@ define <16 x float> @test_maskz_compress_ps_512(<16 x float> %data, i16 %mask)
define <16 x float> @test_compress_ps_512(<16 x float> %data, <16 x float> %extra_param) #0 {
; CHECK-LABEL: @test_compress_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -19526,9 +19526,9 @@ declare <16 x float> @llvm.x86.avx512.mask.compress.ps.512(<16 x float> %data, <
define <8 x i64> @test_mask_compress_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_compress_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -19556,7 +19556,7 @@ define <8 x i64> @test_mask_compress_q_512(<8 x i64> %data, <8 x i64> %passthru,
define <8 x i64> @test_maskz_compress_q_512(<8 x i64> %data, i8 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_compress_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
@@ -19582,7 +19582,7 @@ define <8 x i64> @test_maskz_compress_q_512(<8 x i64> %data, i8 %mask) #0 {
define <8 x i64> @test_compress_q_512(<8 x i64> %data, <8 x i64> %extra_param) #0 {
; CHECK-LABEL: @test_compress_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -19607,9 +19607,9 @@ declare <8 x i64> @llvm.x86.avx512.mask.compress.q.512(<8 x i64> %data, <8 x i64
define <16 x i32> @test_mask_compress_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_compress_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -19637,7 +19637,7 @@ define <16 x i32> @test_mask_compress_d_512(<16 x i32> %data, <16 x i32> %passth
define <16 x i32> @test_maskz_compress_d_512(<16 x i32> %data, i16 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_compress_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
@@ -19663,7 +19663,7 @@ define <16 x i32> @test_maskz_compress_d_512(<16 x i32> %data, i16 %mask) #0 {
define <16 x i32> @test_compress_d_512(<16 x i32> %data, <16 x i32> %extra_param) #0 {
; CHECK-LABEL: @test_compress_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -19688,7 +19688,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.compress.d.512(<16 x i32> %data, <16 x
define <8 x double> @test_expand_pd_512(<8 x double> %data, <8 x double> %extra_param) #0 {
; CHECK-LABEL: @test_expand_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -19711,9 +19711,9 @@ define <8 x double> @test_expand_pd_512(<8 x double> %data, <8 x double> %extra_
define <8 x double> @test_mask_expand_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_expand_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -19741,7 +19741,7 @@ define <8 x double> @test_mask_expand_pd_512(<8 x double> %data, <8 x double> %p
define <8 x double> @test_maskz_expand_pd_512(<8 x double> %data, i8 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_expand_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
@@ -19769,7 +19769,7 @@ declare <8 x double> @llvm.x86.avx512.mask.expand.pd.512(<8 x double> %data, <8
define <16 x float> @test_expand_ps_512(<16 x float> %data, <16 x float> %extra_param) #0 {
; CHECK-LABEL: @test_expand_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -19792,9 +19792,9 @@ define <16 x float> @test_expand_ps_512(<16 x float> %data, <16 x float> %extra_
define <16 x float> @test_mask_expand_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_expand_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -19822,7 +19822,7 @@ define <16 x float> @test_mask_expand_ps_512(<16 x float> %data, <16 x float> %p
define <16 x float> @test_maskz_expand_ps_512(<16 x float> %data, i16 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_expand_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
@@ -19850,7 +19850,7 @@ declare <16 x float> @llvm.x86.avx512.mask.expand.ps.512(<16 x float> %data, <16
define <8 x i64> @test_expand_q_512(<8 x i64> %data, <8 x i64> %extra_param) #0 {
; CHECK-LABEL: @test_expand_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -19873,9 +19873,9 @@ define <8 x i64> @test_expand_q_512(<8 x i64> %data, <8 x i64> %extra_param) #0
define <8 x i64> @test_mask_expand_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) #0 {
;
; CHECK-LABEL: @test_mask_expand_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -19903,7 +19903,7 @@ define <8 x i64> @test_mask_expand_q_512(<8 x i64> %data, <8 x i64> %passthru, i
define <8 x i64> @test_maskz_expand_q_512(<8 x i64> %data, i8 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_expand_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
@@ -19931,7 +19931,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.expand.q.512(<8 x i64> %data, <8 x i64>
define <16 x i32> @test_expand_d_512(<16 x i32> %data, <16 x i32> %extra_param) #0 {
; CHECK-LABEL: @test_expand_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -19954,9 +19954,9 @@ define <16 x i32> @test_expand_d_512(<16 x i32> %data, <16 x i32> %extra_param)
define <16 x i32> @test_mask_expand_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) #0 {
;
; CHECK-LABEL: @test_mask_expand_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -19984,7 +19984,7 @@ define <16 x i32> @test_mask_expand_d_512(<16 x i32> %data, <16 x i32> %passthru
define <16 x i32> @test_maskz_expand_d_512(<16 x i32> %data, i16 %mask) #0 {
;
; CHECK-LABEL: @test_maskz_expand_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
@@ -20014,10 +20014,10 @@ define <16 x float> @test_cmp_512(<16 x float> %a, <16 x float> %b, <16 x float>
; CHECK-LABEL: @test_cmp_512(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP0]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics.ll
index d8f204f..cc022e9 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics.ll
@@ -46,9 +46,9 @@ target triple = "x86_64-unknown-linux-gnu"
define <8 x double> @test_mask_compress_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_compress_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -76,7 +76,7 @@ define <8 x double> @test_mask_compress_pd_512(<8 x double> %data, <8 x double>
define <8 x double> @test_maskz_compress_pd_512(<8 x double> %data, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_compress_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
@@ -103,7 +103,7 @@ define <8 x double> @test_maskz_compress_pd_512(<8 x double> %data, i8 %mask) #0
define <8 x double> @test_compress_pd_512(<8 x double> %data, <8 x double> %extra_param) #0 {
; CHECK-LABEL: @test_compress_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -125,9 +125,9 @@ define <8 x double> @test_compress_pd_512(<8 x double> %data, <8 x double> %extr
define <16 x float> @test_mask_compress_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_mask_compress_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -155,7 +155,7 @@ define <16 x float> @test_mask_compress_ps_512(<16 x float> %data, <16 x float>
define <16 x float> @test_maskz_compress_ps_512(<16 x float> %data, i16 %mask) #0 {
; CHECK-LABEL: @test_maskz_compress_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
@@ -182,7 +182,7 @@ define <16 x float> @test_maskz_compress_ps_512(<16 x float> %data, i16 %mask) #
define <16 x float> @test_compress_ps_512(<16 x float> %data, <16 x float> %extra_param) #0 {
; CHECK-LABEL: @test_compress_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -204,9 +204,9 @@ define <16 x float> @test_compress_ps_512(<16 x float> %data, <16 x float> %extr
define <8 x i64> @test_mask_compress_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_compress_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -234,7 +234,7 @@ define <8 x i64> @test_mask_compress_q_512(<8 x i64> %data, <8 x i64> %passthru,
define <8 x i64> @test_maskz_compress_q_512(<8 x i64> %data, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_compress_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
@@ -261,7 +261,7 @@ define <8 x i64> @test_maskz_compress_q_512(<8 x i64> %data, i8 %mask) #0 {
define <8 x i64> @test_compress_q_512(<8 x i64> %data, <8 x i64> %extra_param) #0 {
; CHECK-LABEL: @test_compress_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -283,9 +283,9 @@ define <8 x i64> @test_compress_q_512(<8 x i64> %data, <8 x i64> %extra_param) #
define <16 x i32> @test_mask_compress_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_mask_compress_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -313,7 +313,7 @@ define <16 x i32> @test_mask_compress_d_512(<16 x i32> %data, <16 x i32> %passth
define <16 x i32> @test_maskz_compress_d_512(<16 x i32> %data, i16 %mask) #0 {
; CHECK-LABEL: @test_maskz_compress_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
@@ -340,7 +340,7 @@ define <16 x i32> @test_maskz_compress_d_512(<16 x i32> %data, i16 %mask) #0 {
define <16 x i32> @test_compress_d_512(<16 x i32> %data, <16 x i32> %extra_param) #0 {
; CHECK-LABEL: @test_compress_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -363,7 +363,7 @@ define <16 x i32> @test_compress_d_512(<16 x i32> %data, <16 x i32> %extra_param
define <8 x double> @test_expand_pd_512(<8 x double> %data, <8 x double> %extra_param) #0 {
; CHECK-LABEL: @test_expand_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -385,9 +385,9 @@ define <8 x double> @test_expand_pd_512(<8 x double> %data, <8 x double> %extra_
define <8 x double> @test_mask_expand_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_expand_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -415,7 +415,7 @@ define <8 x double> @test_mask_expand_pd_512(<8 x double> %data, <8 x double> %p
define <8 x double> @test_maskz_expand_pd_512(<8 x double> %data, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_expand_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
@@ -442,7 +442,7 @@ define <8 x double> @test_maskz_expand_pd_512(<8 x double> %data, i8 %mask) #0 {
define <16 x float> @test_expand_ps_512(<16 x float> %data, <16 x float> %extra_param) #0 {
; CHECK-LABEL: @test_expand_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -464,9 +464,9 @@ define <16 x float> @test_expand_ps_512(<16 x float> %data, <16 x float> %extra_
define <16 x float> @test_mask_expand_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_mask_expand_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -494,7 +494,7 @@ define <16 x float> @test_mask_expand_ps_512(<16 x float> %data, <16 x float> %p
define <16 x float> @test_maskz_expand_ps_512(<16 x float> %data, i16 %mask) #0 {
; CHECK-LABEL: @test_maskz_expand_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
@@ -521,7 +521,7 @@ define <16 x float> @test_maskz_expand_ps_512(<16 x float> %data, i16 %mask) #0
define <8 x i64> @test_expand_q_512(<8 x i64> %data, <8 x i64> %extra_param) #0 {
; CHECK-LABEL: @test_expand_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -543,9 +543,9 @@ define <8 x i64> @test_expand_q_512(<8 x i64> %data, <8 x i64> %extra_param) #0
define <8 x i64> @test_mask_expand_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_expand_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
@@ -573,7 +573,7 @@ define <8 x i64> @test_mask_expand_q_512(<8 x i64> %data, <8 x i64> %passthru, i
define <8 x i64> @test_maskz_expand_q_512(<8 x i64> %data, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_expand_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
@@ -600,7 +600,7 @@ define <8 x i64> @test_maskz_expand_q_512(<8 x i64> %data, i8 %mask) #0 {
define <16 x i32> @test_expand_d_512(<16 x i32> %data, <16 x i32> %extra_param) #0 {
; CHECK-LABEL: @test_expand_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -622,9 +622,9 @@ define <16 x i32> @test_expand_d_512(<16 x i32> %data, <16 x i32> %extra_param)
define <16 x i32> @test_mask_expand_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_mask_expand_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
@@ -652,7 +652,7 @@ define <16 x i32> @test_mask_expand_d_512(<16 x i32> %data, <16 x i32> %passthru
define <16 x i32> @test_maskz_expand_d_512(<16 x i32> %data, i16 %mask) #0 {
; CHECK-LABEL: @test_maskz_expand_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
@@ -713,8 +713,8 @@ declare <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double>, <2 x double
define <2 x double> @test_rndscale_sd(<2 x double> %a, <2 x double> %b, <2 x double> %extra_param) #0 {
; CHECK-LABEL: @test_rndscale_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -740,9 +740,9 @@ define <2 x double> @test_rndscale_sd(<2 x double> %a, <2 x double> %b, <2 x dou
define <2 x double> @test_rndscale_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) #0 {
; CHECK-LABEL: @test_rndscale_sd_mask(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -769,10 +769,10 @@ define <2 x double> @test_rndscale_sd_mask(<2 x double> %a, <2 x double> %b, <2
define <2 x double> @test_rndscale_sd_mask_load(<2 x double> %a, ptr %bptr, <2 x double> %c, i8 %mask) #0 {
; CHECK-LABEL: @test_rndscale_sd_mask_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -812,8 +812,8 @@ define <2 x double> @test_rndscale_sd_mask_load(<2 x double> %a, ptr %bptr, <2 x
define <2 x double> @test_rndscale_sd_maskz(<2 x double> %a, <2 x double> %b, i8 %mask) #0 {
; CHECK-LABEL: @test_rndscale_sd_maskz(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -840,8 +840,8 @@ declare <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float>, <4 x float>,
define <4 x float> @test_rndscale_ss(<4 x float> %a, <4 x float> %b, <4 x float> %extra_param) #0 {
; CHECK-LABEL: @test_rndscale_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -866,9 +866,9 @@ define <4 x float> @test_rndscale_ss(<4 x float> %a, <4 x float> %b, <4 x float>
define <4 x float> @test_rndscale_ss_load(<4 x float> %a, ptr %bptr, <4 x float> %extra_param) #0 {
; CHECK-LABEL: @test_rndscale_ss_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
@@ -906,9 +906,9 @@ define <4 x float> @test_rndscale_ss_load(<4 x float> %a, ptr %bptr, <4 x float>
define <4 x float> @test_rndscale_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) #0 {
; CHECK-LABEL: @test_rndscale_ss_mask(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -936,8 +936,8 @@ define <4 x float> @test_rndscale_ss_mask(<4 x float> %a, <4 x float> %b, <4 x f
define <4 x float> @test_rndscale_ss_maskz(<4 x float> %a, <4 x float> %b, i8 %mask) #0 {
; CHECK-LABEL: @test_rndscale_ss_maskz(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1024,8 +1024,8 @@ define <8 x double> @test_sqrt_pd_512(<8 x double> %a0) #0 {
define <8 x double> @test_mask_sqrt_pd_512(<8 x double> %a0, <8 x double> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_sqrt_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x double> @llvm.sqrt.v8f64(<8 x double> [[A0:%.*]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP2]] to <8 x i1>
@@ -1050,7 +1050,7 @@ define <8 x double> @test_mask_sqrt_pd_512(<8 x double> %a0, <8 x double> %passt
define <8 x double> @test_maskz_sqrt_pd_512(<8 x double> %a0, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_sqrt_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x double> @llvm.sqrt.v8f64(<8 x double> [[A0:%.*]])
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP2]] to <8 x i1>
@@ -1094,8 +1094,8 @@ define <8 x double> @test_sqrt_round_pd_512(<8 x double> %a0) #0 {
define <8 x double> @test_mask_sqrt_round_pd_512(<8 x double> %a0, <8 x double> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_sqrt_round_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -1127,7 +1127,7 @@ define <8 x double> @test_mask_sqrt_round_pd_512(<8 x double> %a0, <8 x double>
define <8 x double> @test_maskz_sqrt_round_pd_512(<8 x double> %a0, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_sqrt_round_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -1171,8 +1171,8 @@ define <16 x float> @test_sqrt_ps_512(<16 x float> %a0) #0 {
define <16 x float> @test_mask_sqrt_ps_512(<16 x float> %a0, <16 x float> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_mask_sqrt_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x float> @llvm.sqrt.v16f32(<16 x float> [[A0:%.*]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
@@ -1197,7 +1197,7 @@ define <16 x float> @test_mask_sqrt_ps_512(<16 x float> %a0, <16 x float> %passt
define <16 x float> @test_maskz_sqrt_ps_512(<16 x float> %a0, i16 %mask) #0 {
; CHECK-LABEL: @test_maskz_sqrt_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x float> @llvm.sqrt.v16f32(<16 x float> [[A0:%.*]])
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
@@ -1241,8 +1241,8 @@ define <16 x float> @test_sqrt_round_ps_512(<16 x float> %a0) #0 {
define <16 x float> @test_mask_sqrt_round_ps_512(<16 x float> %a0, <16 x float> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_mask_sqrt_round_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -1274,7 +1274,7 @@ define <16 x float> @test_mask_sqrt_round_ps_512(<16 x float> %a0, <16 x float>
define <16 x float> @test_maskz_sqrt_round_ps_512(<16 x float> %a0, i16 %mask) #0 {
; CHECK-LABEL: @test_maskz_sqrt_round_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -1385,9 +1385,9 @@ declare <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>, <4 x float>, <4 x
define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_sqrt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1467,9 +1467,9 @@ declare <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>, <2 x double>, <
define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_sqrt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1870,9 +1870,9 @@ declare i32 @llvm.x86.avx512.vcvtss2si32(<4 x float>, i32) nounwind readnone
define <16 x i16> @test_x86_vcvtps2ph_256(<16 x float> %a0, <16 x i16> %src, i16 %mask, ptr %dst) #0 {
; CHECK-LABEL: @test_x86_vcvtps2ph_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = sext <16 x i1> [[TMP6]] to <16 x i16>
@@ -1929,7 +1929,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float>, i32, <16 x
define i16 @test_cmpps(<16 x float> %a, <16 x float> %b) #0 {
; CHECK-LABEL: @test_cmpps(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -1955,7 +1955,7 @@ declare <16 x i1> @llvm.x86.avx512.mask.cmp.ps.512(<16 x float>, <16 x float>, i
define i8 @test_cmppd(<8 x double> %a, <8 x double> %b) #0 {
; CHECK-LABEL: @test_cmppd(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -1983,7 +1983,7 @@ declare <8 x i1> @llvm.x86.avx512.mask.cmp.pd.512(<8 x double>, <8 x double>, i3
define <8 x double> @test_vmaxpd(<8 x double> %a0, <8 x double> %a1) #0 {
; CHECK-LABEL: @test_vmaxpd(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i64> [[_MSPROP]], zeroinitializer
@@ -1999,7 +1999,7 @@ declare <8 x double> @llvm.x86.avx512.max.pd.512(<8 x double>, <8 x double>, i32
define <8 x double> @test_vminpd(<8 x double> %a0, <8 x double> %a1) #0 {
; CHECK-LABEL: @test_vminpd(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i64> [[_MSPROP]], zeroinitializer
@@ -2014,8 +2014,8 @@ declare <8 x double> @llvm.x86.avx512.min.pd.512(<8 x double>, <8 x double>, i32
define void @test_mask_store_ss(ptr %ptr, <4 x float> %data, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_store_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[TMP1]], 0
@@ -2060,7 +2060,7 @@ declare <8 x double> @llvm.x86.avx512.mul.pd.512(<8 x double>, <8 x double>, i32
define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vsubps_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -2083,7 +2083,7 @@ define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vsubps_rd(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -2106,7 +2106,7 @@ define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vsubps_ru(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -2129,7 +2129,7 @@ define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vsubps_rz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -2152,7 +2152,7 @@ define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vmulps_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -2175,7 +2175,7 @@ define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vmulps_rd(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -2198,7 +2198,7 @@ define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vmulps_ru(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -2221,7 +2221,7 @@ define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: @test_vmulps_rz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -2244,8 +2244,8 @@ define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) #0 {
define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_vmulps_mask_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2279,8 +2279,8 @@ define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16
define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_vmulps_mask_rd(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2314,8 +2314,8 @@ define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16
define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_vmulps_mask_ru(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2349,8 +2349,8 @@ define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16
define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_vmulps_mask_rz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2384,9 +2384,9 @@ define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16
define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_vmulps_mask_passthru_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -2421,9 +2421,9 @@ define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float>
define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_vmulps_mask_passthru_rd(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -2458,9 +2458,9 @@ define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float>
define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_vmulps_mask_passthru_ru(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -2495,9 +2495,9 @@ define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float>
define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_vmulps_mask_passthru_rz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -2532,8 +2532,8 @@ define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float>
define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_vmulpd_mask_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2567,8 +2567,8 @@ define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8
define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_vmulpd_mask_rd(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2602,8 +2602,8 @@ define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8
define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_vmulpd_mask_ru(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2637,8 +2637,8 @@ define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8
define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_vmulpd_mask_rz(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2672,8 +2672,8 @@ define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8
define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_add_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2707,8 +2707,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16
define <16 x float> @test_mm512_maskz_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_add_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2742,8 +2742,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rd_sae(<16 x float> %a0, <16
define <16 x float> @test_mm512_maskz_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_add_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2777,8 +2777,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_ru_sae(<16 x float> %a0, <16
define <16 x float> @test_mm512_maskz_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_add_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2812,8 +2812,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rz_sae(<16 x float> %a0, <16
define <16 x float> @test_mm512_maskz_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_add_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -2847,9 +2847,9 @@ define <16 x float> @test_mm512_maskz_add_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_mask_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_add_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -2884,9 +2884,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_rn_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_add_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -2921,9 +2921,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_rd_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_add_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -2958,9 +2958,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_ru_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_add_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -2995,9 +2995,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_rz_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_add_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -3032,7 +3032,7 @@ define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_add_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3055,7 +3055,7 @@ define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_add_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3078,7 +3078,7 @@ define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_add_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3101,7 +3101,7 @@ define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_add_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3124,7 +3124,7 @@ define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_add_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3148,9 +3148,9 @@ declare <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float>, <16 x float>, i32
define <16 x float> @test_mm512_mask_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_sub_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -3185,9 +3185,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rn_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_sub_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -3222,9 +3222,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rd_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_sub_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -3259,9 +3259,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_ru_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_sub_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -3296,9 +3296,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rz_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_sub_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -3333,7 +3333,7 @@ define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_sub_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3356,7 +3356,7 @@ define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_sub_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3379,7 +3379,7 @@ define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_sub_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3402,7 +3402,7 @@ define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_sub_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3425,7 +3425,7 @@ define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_sub_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3448,8 +3448,8 @@ define <16 x float> @test_mm512_sub_round_ps_current(<16 x float> %a0, <16 x flo
define <16 x float> @test_mm512_maskz_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_div_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -3483,8 +3483,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rn_sae(<16 x float> %a0, <16
define <16 x float> @test_mm512_maskz_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_div_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -3518,8 +3518,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rd_sae(<16 x float> %a0, <16
define <16 x float> @test_mm512_maskz_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_div_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -3553,8 +3553,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_ru_sae(<16 x float> %a0, <16
define <16 x float> @test_mm512_maskz_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_div_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -3588,8 +3588,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rz_sae(<16 x float> %a0, <16
define <16 x float> @test_mm512_maskz_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_div_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -3623,9 +3623,9 @@ define <16 x float> @test_mm512_maskz_div_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_mask_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_div_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -3660,9 +3660,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_rn_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_div_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -3697,9 +3697,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_rd_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_div_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -3734,9 +3734,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_ru_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_div_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -3771,9 +3771,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_rz_sae(<16 x float> %a0, <16 x
define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_div_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -3808,7 +3808,7 @@ define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_div_round_ps_rn_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3831,7 +3831,7 @@ define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_div_round_ps_rd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3854,7 +3854,7 @@ define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_div_round_ps_ru_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3877,7 +3877,7 @@ define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_div_round_ps_rz_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3900,7 +3900,7 @@ define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x floa
define <16 x float> @test_mm512_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_div_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -3924,8 +3924,8 @@ declare <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float>, <16 x float>, i32
define <16 x float> @test_mm512_maskz_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_min_round_ps_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -3951,8 +3951,8 @@ define <16 x float> @test_mm512_maskz_min_round_ps_sae(<16 x float> %a0, <16 x f
define <16 x float> @test_mm512_maskz_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_min_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -3978,9 +3978,9 @@ define <16 x float> @test_mm512_maskz_min_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_mask_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_min_round_ps_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -4007,9 +4007,9 @@ define <16 x float> @test_mm512_mask_min_round_ps_sae(<16 x float> %a0, <16 x fl
define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_min_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -4036,7 +4036,7 @@ define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_min_round_ps_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -4051,7 +4051,7 @@ define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float>
define <16 x float> @test_mm512_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_min_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -4067,8 +4067,8 @@ declare <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float>, <16 x float>, i32
define <16 x float> @test_mm512_maskz_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_max_round_ps_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -4094,8 +4094,8 @@ define <16 x float> @test_mm512_maskz_max_round_ps_sae(<16 x float> %a0, <16 x f
define <16 x float> @test_mm512_maskz_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_maskz_max_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -4121,9 +4121,9 @@ define <16 x float> @test_mm512_maskz_max_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_mask_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_max_round_ps_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -4150,9 +4150,9 @@ define <16 x float> @test_mm512_mask_max_round_ps_sae(<16 x float> %a0, <16 x fl
define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_mask_max_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -4179,7 +4179,7 @@ define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16
define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_max_round_ps_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -4194,7 +4194,7 @@ define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float>
define <16 x float> @test_mm512_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_mm512_max_round_ps_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -4212,9 +4212,9 @@ declare <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_add_ss_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -4242,9 +4242,9 @@ define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_add_ss_rd(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -4272,9 +4272,9 @@ define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_add_ss_ru(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -4302,9 +4302,9 @@ define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_add_ss_rz(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -4332,9 +4332,9 @@ define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_add_ss_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -4362,8 +4362,8 @@ define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <
define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_add_ss_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -4388,7 +4388,7 @@ define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %m
define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_add_ss_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -4410,11 +4410,11 @@ define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) #0 {
define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, ptr %a1, <4 x float> %a2, i8 %mask, <4 x float> %extra_param) #0 {
; CHECK-LABEL: @test_mask_add_ss_current_memfold(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP13:%.*]], !prof [[PROF1]]
@@ -4465,10 +4465,10 @@ define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, ptr %a1, <
define <4 x float> @test_maskz_add_ss_current_memfold(<4 x float> %a0, ptr %a1, i8 %mask, <4 x float> %extra_param) #0 {
; CHECK-LABEL: @test_maskz_add_ss_current_memfold(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP11:%.*]], !prof [[PROF1]]
@@ -4519,9 +4519,9 @@ declare <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_add_sd_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -4549,9 +4549,9 @@ define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_add_sd_rd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -4579,9 +4579,9 @@ define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_add_sd_ru(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -4609,9 +4609,9 @@ define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_add_sd_rz(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -4639,9 +4639,9 @@ define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_add_sd_current(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -4669,8 +4669,8 @@ define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1
define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_add_sd_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -4695,7 +4695,7 @@ define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8
define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_add_sd_rn(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -4717,11 +4717,11 @@ define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) #0 {
define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, ptr %a1, <2 x double> %a2, i8 %mask, <2 x double> %extra_param) #0 {
; CHECK-LABEL: @test_mask_add_sd_current_memfold(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP13:%.*]], !prof [[PROF1]]
@@ -4766,10 +4766,10 @@ define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, ptr %a1,
define <2 x double> @test_maskz_add_sd_current_memfold(<2 x double> %a0, ptr %a1, i8 %mask, <2 x double> %extra_param) #0 {
; CHECK-LABEL: @test_maskz_add_sd_current_memfold(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP11:%.*]], !prof [[PROF1]]
@@ -4814,9 +4814,9 @@ declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_max_ss_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -4844,8 +4844,8 @@ define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x
define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_max_ss_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -4870,7 +4870,7 @@ define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 %
define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_max_ss_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -4893,9 +4893,9 @@ define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) #0 {
define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_max_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -4923,8 +4923,8 @@ define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x floa
define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_max_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -4949,7 +4949,7 @@ define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask
define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_max_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -4971,11 +4971,11 @@ define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) #0 {
define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, ptr %a1, <4 x float> %a2, i8 %mask, <4 x float> %extra_param) #0 {
; CHECK-LABEL: @test_mask_max_ss_memfold(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP13:%.*]], !prof [[PROF1]]
@@ -5026,10 +5026,10 @@ define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, ptr %a1, <4 x floa
define <4 x float> @test_maskz_max_ss_memfold(<4 x float> %a0, ptr %a1, i8 %mask, <4 x float> %extra_param) #0 {
; CHECK-LABEL: @test_maskz_max_ss_memfold(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP11:%.*]], !prof [[PROF1]]
@@ -5079,9 +5079,9 @@ declare <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_max_sd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -5109,8 +5109,8 @@ define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_max_sd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -5135,7 +5135,7 @@ define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i
define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_max_sd_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -5158,9 +5158,9 @@ define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) #0 {
define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_max_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -5188,8 +5188,8 @@ define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x d
define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_max_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -5214,7 +5214,7 @@ define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %m
define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_max_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -5236,11 +5236,11 @@ define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) #0 {
define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, ptr %a1, <2 x double> %a2, i8 %mask, <2 x double> %extra_param) #0 {
; CHECK-LABEL: @test_mask_max_sd_memfold(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP13:%.*]], !prof [[PROF1]]
@@ -5285,10 +5285,10 @@ define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, ptr %a1, <2 x do
define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, ptr %a1, i8 %mask, <2 x double> %extra_param) #0 {
; CHECK-LABEL: @test_maskz_max_sd_memfold(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP11:%.*]], !prof [[PROF1]]
@@ -5331,7 +5331,7 @@ define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, ptr %a1, i8 %ma
define <4 x float> @test_x86_avx512_cvtsi2ss32(<4 x float> %a, i32 %b) #0 {
; CHECK-LABEL: @test_x86_avx512_cvtsi2ss32(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -5353,7 +5353,7 @@ declare <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float>, i32, i32) nounwind
define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss (<4 x float> %a, i32 %b) #0 {
; CHECK-LABEL: @test_x86_avx512__mm_cvt_roundu32_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 0, i32 0
@@ -5373,7 +5373,7 @@ define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss (<4 x float> %a, i32 %b)
define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss_mem(<4 x float> %a, ptr %ptr) #0 {
; CHECK-LABEL: @test_x86_avx512__mm_cvt_roundu32_ss_mem(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -5405,7 +5405,7 @@ define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss_mem(<4 x float> %a, ptr
define <4 x float> @test_x86_avx512__mm_cvtu32_ss(<4 x float> %a, i32 %b) #0 {
; CHECK-LABEL: @test_x86_avx512__mm_cvtu32_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 0, i32 0
@@ -5425,7 +5425,7 @@ define <4 x float> @test_x86_avx512__mm_cvtu32_ss(<4 x float> %a, i32 %b) #0 {
define <4 x float> @test_x86_avx512__mm_cvtu32_ss_mem(<4 x float> %a, ptr %ptr) #0 {
; CHECK-LABEL: @test_x86_avx512__mm_cvtu32_ss_mem(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -5460,9 +5460,9 @@ declare <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32>, <16 x i32>, <16
define <16 x i32>@test_int_x86_avx512_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -5495,10 +5495,10 @@ define <16 x i32>@test_int_x86_avx512_vpermi2var_d_512(<16 x i32> %x0, <16 x i32
define <16 x i32>@test_int_x86_avx512_mask_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, i16 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -5544,8 +5544,8 @@ declare <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double>, <8 x i64>,
define <8 x double>@test_int_x86_avx512_vpermi2var_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double>
@@ -5570,9 +5570,9 @@ define <8 x double>@test_int_x86_avx512_vpermi2var_pd_512(<8 x double> %x0, <8 x
define <8 x double>@test_int_x86_avx512_mask_vpermi2var_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = trunc <8 x i64> [[TMP2]] to <8 x i3>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double>
@@ -5613,8 +5613,8 @@ declare <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float>, <16 x i32>
define <16 x float>@test_int_x86_avx512_vpermi2var_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
@@ -5639,9 +5639,9 @@ define <16 x float>@test_int_x86_avx512_vpermi2var_ps_512(<16 x float> %x0, <16
define <16 x float>@test_int_x86_avx512_mask_vpermi2var_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = trunc <16 x i32> [[TMP2]] to <16 x i4>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
@@ -5682,8 +5682,8 @@ declare <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64>, <8 x i64>, <8 x i
define <8 x i64>@test_int_x86_avx512_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3>
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> [[X3:%.*]], <8 x i64> [[TMP3]])
@@ -5705,9 +5705,9 @@ define <8 x i64>@test_int_x86_avx512_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> %
define <8 x i64>@test_int_x86_avx512_mask_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = trunc <8 x i64> [[TMP2]] to <8 x i3>
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> [[X1:%.*]], <8 x i64> [[TMP3]])
@@ -5738,10 +5738,10 @@ define <8 x i64>@test_int_x86_avx512_mask_vpermi2var_q_512(<8 x i64> %x0, <8 x i
define <16 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, i16 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -5784,12 +5784,12 @@ define <16 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_512(<16 x i32> %x0, <16
define <8 x double>@test_int_x86_avx512_maskz_vpermt2var_pd_512(<8 x i64> %x0, <8 x double> %x1, ptr %x2ptr, i8 %x3, <8 x double> %extra_param, <8 x double> %extra_param2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_pd_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[X0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP12:%.*]], !prof [[PROF1]]
@@ -5842,10 +5842,10 @@ define <8 x double>@test_int_x86_avx512_maskz_vpermt2var_pd_512(<8 x i64> %x0, <
define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_ps_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = trunc <16 x i32> [[X0]] to <16 x i4>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
@@ -5880,10 +5880,10 @@ define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0,
define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_q_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = trunc <8 x i64> [[X0]] to <8 x i3>
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> [[X4:%.*]], <8 x i64> [[TMP3]])
@@ -5914,8 +5914,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x
define <16 x i32>@test_int_x86_avx512_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermt2var_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = trunc <16 x i32> [[X0]] to <16 x i4>
@@ -5937,10 +5937,10 @@ define <16 x i32>@test_int_x86_avx512_vpermt2var_d_512(<16 x i32> %x0, <16 x i32
define <16 x i32>@test_int_x86_avx512_mask_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermt2var_d_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = trunc <16 x i32> [[X0]] to <16 x i4>
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> [[TMP1]], <16 x i32> [[X4:%.*]], <16 x i32> [[TMP3]])
@@ -5973,9 +5973,9 @@ declare <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double>, <8 x doub
define <8 x double>@test_int_x86_avx512_mask_scalef_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_scalef_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -6021,9 +6021,9 @@ declare <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float>, <16 x flo
define <16 x float>@test_int_x86_avx512_mask_scalef_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_scalef_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -6070,8 +6070,8 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qb_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> [[TMP1]], <16 x i8> [[TMP2]], i8 -1)
; CHECK-NEXT: [[_MSPROP2:%.*]] = or <16 x i8> zeroinitializer, [[TMP4]]
@@ -6106,8 +6106,8 @@ declare void @llvm.x86.avx512.mask.pmov.qb.mem.512(ptr %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmov_qb_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qb_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512
@@ -6143,8 +6143,8 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.512(<8 x i64>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_qb_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> [[TMP1]], <16 x i8> [[TMP2]], i8 -1)
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i8> zeroinitializer, [[TMP4]]
@@ -6179,8 +6179,8 @@ declare void @llvm.x86.avx512.mask.pmovs.qb.mem.512(ptr %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovs_qb_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_qb_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512
@@ -6216,8 +6216,8 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.512(<8 x i64>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_qb_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> [[TMP1]], <16 x i8> [[TMP2]], i8 -1)
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i8> zeroinitializer, [[TMP4]]
@@ -6252,8 +6252,8 @@ declare void @llvm.x86.avx512.mask.pmovus.qb.mem.512(ptr %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovus_qb_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_qb_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512
@@ -6289,8 +6289,8 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qw_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> [[TMP1]], <8 x i16> [[TMP2]], i8 -1)
; CHECK-NEXT: [[_MSPROP2:%.*]] = or <8 x i16> zeroinitializer, [[TMP8]]
@@ -6325,8 +6325,8 @@ declare void @llvm.x86.avx512.mask.pmov.qw.mem.512(ptr %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmov_qw_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qw_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512
@@ -6362,8 +6362,8 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.512(<8 x i64>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_qw_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> [[TMP1]], <8 x i16> [[TMP2]], i8 -1)
; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i16> zeroinitializer, [[TMP11]]
@@ -6398,8 +6398,8 @@ declare void @llvm.x86.avx512.mask.pmovs.qw.mem.512(ptr %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovs_qw_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_qw_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512
@@ -6435,8 +6435,8 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_qw_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> [[TMP1]], <8 x i16> [[TMP2]], i8 -1)
; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i16> zeroinitializer, [[TMP11]]
@@ -6471,8 +6471,8 @@ declare void @llvm.x86.avx512.mask.pmovus.qw.mem.512(ptr %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovus_qw_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_qw_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512
@@ -6519,8 +6519,8 @@ define <8 x i32>@test_int_x86_avx512_pmov_qd_512(<8 x i64> %x0, <8 x i32> %x1) #
define <8 x i32>@test_int_x86_avx512_mask_pmov_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = trunc <8 x i64> [[X0:%.*]] to <8 x i32>
@@ -6544,7 +6544,7 @@ define <8 x i32>@test_int_x86_avx512_mask_pmov_qd_512(<8 x i64> %x0, <8 x i32> %
define <8 x i32>@test_int_x86_avx512_maskz_pmov_qd_512(<8 x i64> %x0, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmov_qd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32>
; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i64> [[X0:%.*]] to <8 x i32>
@@ -6570,8 +6570,8 @@ declare void @llvm.x86.avx512.mask.pmov.qd.mem.512(ptr %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmov_qd_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qd_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512
@@ -6607,7 +6607,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmovs.qd.512(<8 x i64>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_pmovs_qd_512(<8 x i64> %x0, <8 x i32> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmovs_qd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = select <8 x i1> splat (i1 true), <8 x i32> [[TMP3]], <8 x i32> [[TMP2]]
@@ -6621,9 +6621,9 @@ define <8 x i32>@test_int_x86_avx512_pmovs_qd_512(<8 x i64> %x0, <8 x i32> %x1)
define <8 x i32>@test_int_x86_avx512_mask_pmovs_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_qd_512(
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4:%.*]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32>
@@ -6644,7 +6644,7 @@ define <8 x i32>@test_int_x86_avx512_mask_pmovs_qd_512(<8 x i64> %x0, <8 x i32>
define <8 x i32>@test_int_x86_avx512_maskz_pmovs_qd_512(<8 x i64> %x0, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovs_qd_512(
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP3:%.*]] to <8 x i1>
@@ -6669,8 +6669,8 @@ declare void @llvm.x86.avx512.mask.pmovs.qd.mem.512(ptr %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovs_qd_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_qd_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512
@@ -6706,7 +6706,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_pmovus_qd_512(<8 x i64> %x0, <8 x i32> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmovus_qd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = select <8 x i1> splat (i1 true), <8 x i32> [[TMP3]], <8 x i32> [[TMP2]]
@@ -6720,9 +6720,9 @@ define <8 x i32>@test_int_x86_avx512_pmovus_qd_512(<8 x i64> %x0, <8 x i32> %x1)
define <8 x i32>@test_int_x86_avx512_mask_pmovus_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_qd_512(
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4:%.*]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32>
@@ -6743,7 +6743,7 @@ define <8 x i32>@test_int_x86_avx512_mask_pmovus_qd_512(<8 x i64> %x0, <8 x i32>
define <8 x i32>@test_int_x86_avx512_maskz_pmovus_qd_512(<8 x i64> %x0, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovus_qd_512(
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP3:%.*]] to <8 x i1>
@@ -6768,8 +6768,8 @@ declare void @llvm.x86.avx512.mask.pmovus.qd.mem.512(ptr %ptr, <8 x i64>, i8)
define void @test_int_x86_avx512_mask_pmovus_qd_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_qd_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512
@@ -6805,8 +6805,8 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32>, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_pmov_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_db_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> [[TMP1]], <16 x i8> [[TMP2]], i16 -1)
; CHECK-NEXT: [[_MSPROP2:%.*]] = or <16 x i8> zeroinitializer, [[TMP8]]
@@ -6841,8 +6841,8 @@ declare void @llvm.x86.avx512.mask.pmov.db.mem.512(ptr %ptr, <16 x i32>, i16)
define void @test_int_x86_avx512_mask_pmov_db_mem_512(ptr %ptr, <16 x i32> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_db_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512
@@ -6878,8 +6878,8 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32>, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_db_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> [[TMP1]], <16 x i8> [[TMP2]], i16 -1)
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i8> zeroinitializer, [[TMP11]]
@@ -6914,8 +6914,8 @@ declare void @llvm.x86.avx512.mask.pmovs.db.mem.512(ptr %ptr, <16 x i32>, i16)
define void @test_int_x86_avx512_mask_pmovs_db_mem_512(ptr %ptr, <16 x i32> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_db_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512
@@ -6951,8 +6951,8 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32>, <16 x i8>, i16
define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_db_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> [[TMP1]], <16 x i8> [[TMP2]], i16 -1)
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i8> zeroinitializer, [[TMP11]]
@@ -6987,8 +6987,8 @@ declare void @llvm.x86.avx512.mask.pmovus.db.mem.512(ptr %ptr, <16 x i32>, i16)
define void @test_int_x86_avx512_mask_pmovus_db_mem_512(ptr %ptr, <16 x i32> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_db_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512
@@ -7024,8 +7024,8 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32>, <16 x i16>, i16
define <16 x i16>@test_int_x86_avx512_mask_pmov_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_dw_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> [[TMP1]], <16 x i16> [[TMP2]], i16 -1)
; CHECK-NEXT: [[_MSPROP2:%.*]] = or <16 x i16> zeroinitializer, [[TMP8]]
@@ -7060,8 +7060,8 @@ declare void @llvm.x86.avx512.mask.pmov.dw.mem.512(ptr %ptr, <16 x i32>, i16)
define void @test_int_x86_avx512_mask_pmov_dw_mem_512(ptr %ptr, <16 x i32> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_dw_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512
@@ -7097,8 +7097,8 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32>, <16 x i16>, i1
define <16 x i16>@test_int_x86_avx512_mask_pmovs_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_dw_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> [[TMP1]], <16 x i16> [[TMP2]], i16 -1)
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i16> zeroinitializer, [[TMP11]]
@@ -7133,8 +7133,8 @@ declare void @llvm.x86.avx512.mask.pmovs.dw.mem.512(ptr %ptr, <16 x i32>, i16)
define void @test_int_x86_avx512_mask_pmovs_dw_mem_512(ptr %ptr, <16 x i32> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_dw_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512
@@ -7170,8 +7170,8 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32>, <16 x i16>, i
define <16 x i16>@test_int_x86_avx512_mask_pmovus_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_dw_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> [[TMP1]], <16 x i16> [[TMP2]], i16 -1)
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i16> zeroinitializer, [[TMP11]]
@@ -7206,8 +7206,8 @@ declare void @llvm.x86.avx512.mask.pmovus.dw.mem.512(ptr %ptr, <16 x i32>, i16)
define void @test_int_x86_avx512_mask_pmovus_dw_mem_512(ptr %ptr, <16 x i32> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_dw_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512
@@ -7243,8 +7243,8 @@ declare <16 x float> @llvm.x86.avx512.sitofp.round.v16f32.v16i32(<16 x i32>, i32
define <16 x float>@test_int_x86_avx512_mask_cvt_dq2ps_512(<16 x i32> %x0, <16 x float> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_dq2ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[CVT:%.*]] = sitofp <16 x i32> [[X0:%.*]] to <16 x float>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
@@ -7283,8 +7283,8 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2dq.512(<8 x double>, <8 x i32>, i8
define <8 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_pd2dq_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -7325,8 +7325,8 @@ declare <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double>, <8 x float>
define <8 x float>@test_int_x86_avx512_mask_cvt_pd2ps_512(<8 x double> %x0, <8 x float> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_pd2ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -7367,8 +7367,8 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double>, <8 x i32>, i
define <8 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_pd2udq_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -7408,9 +7408,9 @@ declare <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512(<16 x float>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_cvt_ps2dq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_ps2dq_512(
-; CHECK-NEXT: [[TMP10:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[X2:%.*]] to <16 x i1>
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
@@ -7443,8 +7443,8 @@ declare <8 x double> @llvm.x86.avx512.mask.cvtps2pd.512(<8 x float>, <8 x double
define <8 x double>@test_int_x86_avx512_mask_cvt_ps2pd_512(<8 x float> %x0, <8 x double> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_ps2pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -7485,8 +7485,8 @@ declare <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_cvt_ps2udq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_ps2udq_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -7527,8 +7527,8 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvttpd2dq.512(<8 x double>, <8 x i32>, i
define <8 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvtt_pd2dq_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -7569,8 +7569,8 @@ declare <16 x float> @llvm.x86.avx512.uitofp.round.v16f32.v16i32(<16 x i32>, i32
define <16 x float>@test_int_x86_avx512_mask_cvt_udq2ps_512(<16 x i32> %x0, <16 x float> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_udq2ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[CVT:%.*]] = uitofp <16 x i32> [[X0:%.*]] to <16 x float>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
@@ -7609,8 +7609,8 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvttpd2udq.512(<8 x double>, <8 x i32>,
define <8 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvtt_pd2udq_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -7651,8 +7651,8 @@ declare <16 x i32> @llvm.x86.avx512.mask.cvttps2dq.512(<16 x float>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_cvtt_ps2dq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvtt_ps2dq_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -7693,8 +7693,8 @@ declare <16 x i32> @llvm.x86.avx512.mask.cvttps2udq.512(<16 x float>, <16 x i32>
define <16 x i32>@test_int_x86_avx512_mask_cvtt_ps2udq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvtt_ps2udq_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -7735,7 +7735,7 @@ declare <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>, <4 x float>, <4
define <4 x float> @test_getexp_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_getexp_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -7758,9 +7758,9 @@ define <4 x float> @test_getexp_ss(<4 x float> %a0, <4 x float> %a1) #0 {
define <4 x float> @test_mask_getexp_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_getexp_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -7807,8 +7807,8 @@ define <4 x float> @test_mask_getexp_ss(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_maskz_getexp_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_getexp_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -7835,7 +7835,7 @@ declare <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>, <2 x double>,
define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_getexp_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -7858,9 +7858,9 @@ define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1) #0 {
define <2 x double> @test_mask_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_mask_getexp_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -7907,8 +7907,8 @@ define <2 x double> @test_mask_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_maskz_getexp_sd(<2 x double> %a0, <2 x double> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_maskz_getexp_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -7935,8 +7935,8 @@ declare i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double>, <2 x double>, i32, i8, i32
define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cmp_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -7961,8 +7961,8 @@ define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8
define i8@test_int_x86_avx512_mask_cmp_sd_all(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cmp_sd_all(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -8053,8 +8053,8 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32)
define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cmp_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -8080,8 +8080,8 @@ define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %
define i8@test_int_x86_avx512_mask_cmp_ss_all(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cmp_ss_all(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -8166,8 +8166,8 @@ declare <8 x double> @llvm.x86.avx512.mask.getmant.pd.512(<8 x double>, i32, <8
define <8 x double>@test_int_x86_avx512_mask_getmant_pd_512(<8 x double> %x0, <8 x double> %x2, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_getmant_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -8208,8 +8208,8 @@ declare <16 x float> @llvm.x86.avx512.mask.getmant.ps.512(<16 x float>, i32, <16
define <16 x float>@test_int_x86_avx512_mask_getmant_ps_512(<16 x float> %x0, <16 x float> %x2, i16 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_getmant_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -8250,9 +8250,9 @@ declare <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask_getmant_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_getmant_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -8334,9 +8334,9 @@ declare <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float>, <4 x float>, i
define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_getmant_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -8413,9 +8413,9 @@ define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x flo
define <4 x float> @test_int_x86_avx512_mask_getmant_ss_load(<4 x float> %x0, ptr %x1p, <4 x float> %extra_param) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_getmant_ss_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
@@ -8455,7 +8455,7 @@ declare <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double>, <8 x i64>)
define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512(<8 x double> %x0, <8 x i64> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3>
; CHECK-NEXT: [[X0:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double>
@@ -8479,9 +8479,9 @@ define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512(<8 x double> %x0, <8 x
define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_mask(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %mask) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_pd_512_mask(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3>
; CHECK-NEXT: [[X0:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double>
@@ -8517,8 +8517,8 @@ define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_mask(<8 x double> %x0,
define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_maskz(<8 x double> %x0, <8 x i64> %x1, i8 %mask) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_pd_512_maskz(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3>
; CHECK-NEXT: [[X0:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double>
@@ -8555,7 +8555,7 @@ declare <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float>, <16 x i32>
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512(<16 x float> %x0, <16 x i32> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4>
; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
@@ -8579,9 +8579,9 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512(<16 x float> %x0, <16
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_mask(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %mask) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_ps_512_mask(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4>
; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
@@ -8617,8 +8617,8 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_mask(<16 x float> %x0,
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_maskz(<16 x float> %x0, <16 x i32> %x1, i16 %mask) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_ps_512_maskz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4>
; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
@@ -8668,8 +8668,8 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool(<16 x fl
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %mask) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
; CHECK-NEXT: [[RES:%.*]] = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> [[X0]], <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 1, i32 0>)
@@ -8697,7 +8697,7 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask(<16
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz(<16 x float> %x0, <16 x i32> %x1, i16 %mask) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float>
; CHECK-NEXT: [[RES:%.*]] = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> [[X0]], <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 1, i32 0>)
@@ -8726,9 +8726,9 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtss2sd.round(<2 x double>, <4 x flo
define <2 x double>@test_int_x86_avx512_mask_cvt_ss2sd_round(<2 x double> %x0,<4 x float> %x1, <2 x double> %x2, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_ss2sd_round(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -8775,9 +8775,9 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtsd2ss.round(<4 x float>, <2 x doubl
define <4 x float>@test_int_x86_avx512_mask_cvt_sd2ss_round(<4 x float> %x0,<2 x double> %x1, <4 x float> %x2, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_sd2ss_round(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -8824,8 +8824,8 @@ declare <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32>, <16 x i32>, <16 x
define <16 x i32>@test_int_x86_avx512_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pternlog_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -8851,9 +8851,9 @@ define <16 x i32>@test_int_x86_avx512_pternlog_d_512(<16 x i32> %x0, <16 x i32>
define <16 x i32>@test_int_x86_avx512_mask_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pternlog_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -8889,9 +8889,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pternlog_d_512(<16 x i32> %x0, <16 x
define <16 x i32>@test_int_x86_avx512_maskz_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pternlog_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -8929,8 +8929,8 @@ declare <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64>, <8 x i64>, <8 x i64
define <8 x i64>@test_int_x86_avx512_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pternlog_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -8956,9 +8956,9 @@ define <8 x i64>@test_int_x86_avx512_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1
define <8 x i64>@test_int_x86_avx512_mask_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pternlog_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -8994,9 +8994,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pternlog_q_512(<8 x i64> %x0, <8 x i64
define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pternlog_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -9032,7 +9032,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i6
define i32 @test_x86_avx512_comi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_comi_sd_eq_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -9055,7 +9055,7 @@ define i32 @test_x86_avx512_comi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) #
define i32 @test_x86_avx512_ucomi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_ucomi_sd_eq_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -9078,7 +9078,7 @@ define i32 @test_x86_avx512_ucomi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1)
define i32 @test_x86_avx512_comi_sd_eq(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_comi_sd_eq(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -9101,7 +9101,7 @@ define i32 @test_x86_avx512_comi_sd_eq(<2 x double> %a0, <2 x double> %a1) #0 {
define i32 @test_x86_avx512_ucomi_sd_eq(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_ucomi_sd_eq(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -9124,7 +9124,7 @@ define i32 @test_x86_avx512_ucomi_sd_eq(<2 x double> %a0, <2 x double> %a1) #0 {
define i32 @test_x86_avx512_comi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_comi_sd_lt_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -9147,7 +9147,7 @@ define i32 @test_x86_avx512_comi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) #
define i32 @test_x86_avx512_ucomi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_ucomi_sd_lt_sae(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -9170,7 +9170,7 @@ define i32 @test_x86_avx512_ucomi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1)
define i32 @test_x86_avx512_comi_sd_lt(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_comi_sd_lt(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -9193,7 +9193,7 @@ define i32 @test_x86_avx512_comi_sd_lt(<2 x double> %a0, <2 x double> %a1) #0 {
define i32 @test_x86_avx512_ucomi_sd_lt(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_ucomi_sd_lt(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -9218,7 +9218,7 @@ declare i32 @llvm.x86.avx512.vcomi.sd(<2 x double>, <2 x double>, i32, i32)
define i32 @test_x86_avx512_ucomi_ss_lt(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_ucomi_ss_lt(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -9245,7 +9245,7 @@ declare <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double>, <8 x i64>)
define <8 x double>@test_int_x86_avx512_permvar_df_512(<8 x double> %x0, <8 x i64> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_permvar_df_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -9268,9 +9268,9 @@ define <8 x double>@test_int_x86_avx512_permvar_df_512(<8 x double> %x0, <8 x i6
define <8 x double>@test_int_x86_avx512_mask_permvar_df_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_df_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -9305,8 +9305,8 @@ define <8 x double>@test_int_x86_avx512_mask_permvar_df_512(<8 x double> %x0, <8
define <8 x double>@test_int_x86_avx512_maskz_permvar_df_512(<8 x double> %x0, <8 x i64> %x1, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_df_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -9342,7 +9342,7 @@ declare <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64>, <8 x i64>)
define <8 x i64>@test_int_x86_avx512_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_permvar_di_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -9356,9 +9356,9 @@ define <8 x i64>@test_int_x86_avx512_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1
define <8 x i64>@test_int_x86_avx512_mask_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_di_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -9382,8 +9382,8 @@ define <8 x i64>@test_int_x86_avx512_mask_permvar_di_512(<8 x i64> %x0, <8 x i64
define <8 x i64>@test_int_x86_avx512_maskz_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1, i8 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_di_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]])
@@ -9409,7 +9409,7 @@ declare <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float>, <16 x i32>)
define <16 x float>@test_int_x86_avx512_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_permvar_sf_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -9432,9 +9432,9 @@ define <16 x float>@test_int_x86_avx512_permvar_sf_512(<16 x float> %x0, <16 x i
define <16 x float>@test_int_x86_avx512_mask_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_sf_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -9469,8 +9469,8 @@ define <16 x float>@test_int_x86_avx512_mask_permvar_sf_512(<16 x float> %x0, <1
define <16 x float>@test_int_x86_avx512_maskz_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1, i16 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_sf_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -9506,7 +9506,7 @@ declare <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32>, <16 x i32>)
define <16 x i32>@test_int_x86_avx512_permvar_si_512(<16 x i32> %x0, <16 x i32> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_permvar_si_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -9520,9 +9520,9 @@ define <16 x i32>@test_int_x86_avx512_permvar_si_512(<16 x i32> %x0, <16 x i32>
define <16 x i32>@test_int_x86_avx512_mask_permvar_si_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_si_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -9546,8 +9546,8 @@ define <16 x i32>@test_int_x86_avx512_mask_permvar_si_512(<16 x i32> %x0, <16 x
define <16 x i32>@test_int_x86_avx512_maskz_permvar_si_512(<16 x i32> %x0, <16 x i32> %x1, i16 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_si_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]])
@@ -9573,9 +9573,9 @@ declare <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double>, <8 x do
define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i8 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_fixupimm_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -9635,9 +9635,9 @@ define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512(<8 x double> %x0, <
define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512_load(<8 x double> %x0, <8 x double> %x1, ptr %x2ptr) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_fixupimm_pd_512_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -9677,9 +9677,9 @@ declare <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double>, <8 x d
define <8 x double>@test_int_x86_avx512_maskz_fixupimm_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i8 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_fixupimm_pd_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -9742,9 +9742,9 @@ declare <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_fixupimm_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -9807,9 +9807,9 @@ declare <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_fixupimm_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -9872,9 +9872,9 @@ declare <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float>, <16 x f
define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i16 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_fixupimm_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -9934,9 +9934,9 @@ define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512(<16 x float> %x0, <
define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512_load(<16 x float> %x0, <16 x float> %x1, ptr %x2ptr) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_fixupimm_ps_512_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -9976,9 +9976,9 @@ declare <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float>, <16 x
define <16 x float>@test_int_x86_avx512_maskz_fixupimm_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i16 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_fixupimm_ps_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -10041,9 +10041,9 @@ declare <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double>, <2 x double
define <2 x double>@test_int_x86_avx512_mask_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_fixupimm_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -10106,9 +10106,9 @@ declare <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double>, <2 x doubl
define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_fixupimm_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -10174,9 +10174,9 @@ declare double @llvm.x86.avx512.vfmadd.f64(double, double, double, i32) #0
define <2 x double> @test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_vfmadd_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[X0:%.*]], i64 0
@@ -10287,9 +10287,9 @@ define <2 x double> @test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x d
define <4 x float> @test_int_x86_avx512_mask_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_vfmadd_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[X0:%.*]], i64 0
@@ -10512,9 +10512,9 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss(<4 x float> %x0, <4 x flo
define <4 x float> @test_int_x86_avx512_maskz_vfmadd_ss_load0(i8 zeroext %0, ptr nocapture readonly %1, float %2, float %3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_vfmadd_ss_load0(
-; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
@@ -10562,9 +10562,9 @@ define <4 x float> @test_int_x86_avx512_maskz_vfmadd_ss_load0(i8 zeroext %0, ptr
define <2 x double> @test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmadd_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[X0:%.*]], i64 0
@@ -10675,9 +10675,9 @@ define <2 x double> @test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x
define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmadd_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[X0:%.*]], i64 0
@@ -10788,10 +10788,10 @@ define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x fl
define void @fmadd_ss_mask_memfold(ptr %a, ptr %b, i8 %c, <4 x float> %extra_param, <4 x float> %extra_param2) #0 {
; CHECK-LABEL: @fmadd_ss_mask_memfold(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
@@ -10896,10 +10896,10 @@ define void @fmadd_ss_mask_memfold(ptr %a, ptr %b, i8 %c, <4 x float> %extra_par
define void @fmadd_ss_maskz_memfold(ptr %a, ptr %b, i8 %c, <4 x float> %extra_param, <4 x float> %extra_param2) #0 {
; CHECK-LABEL: @fmadd_ss_maskz_memfold(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
@@ -11003,10 +11003,10 @@ define void @fmadd_ss_maskz_memfold(ptr %a, ptr %b, i8 %c, <4 x float> %extra_pa
define void @fmadd_sd_mask_memfold(ptr %a, ptr %b, i8 %c, <2 x double> %extra_param, <2 x double> %extra_param2) #0 {
; CHECK-LABEL: @fmadd_sd_mask_memfold(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
@@ -11099,10 +11099,10 @@ define void @fmadd_sd_mask_memfold(ptr %a, ptr %b, i8 %c, <2 x double> %extra_pa
define void @fmadd_sd_maskz_memfold(ptr %a, ptr %b, i8 %c, <2x double> %extra_param, <2x double> %extra_param2) #0 {
; CHECK-LABEL: @fmadd_sd_maskz_memfold(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
@@ -11193,10 +11193,10 @@ define void @fmadd_sd_maskz_memfold(ptr %a, ptr %b, i8 %c, <2x double> %extra_pa
define <2 x double> @test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmsub_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = fneg <2 x double> [[X2:%.*]]
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP2]], i64 0
@@ -11321,10 +11321,10 @@ define <2 x double> @test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x
define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmsub_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = fneg <4 x float> [[X2:%.*]]
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP2]], i64 0
@@ -11450,9 +11450,9 @@ define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x fl
define <2 x double> @test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfnmsub_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = fneg <2 x double> [[X0:%.*]]
; CHECK-NEXT: [[TMP6:%.*]] = fneg <2 x double> [[X2:%.*]]
@@ -11584,9 +11584,9 @@ define <2 x double> @test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x
define <4 x float> @test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfnmsub_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = fneg <4 x float> [[X0:%.*]]
; CHECK-NEXT: [[TMP6:%.*]] = fneg <4 x float> [[X2:%.*]]
@@ -11717,11 +11717,11 @@ define <4 x float> @test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x f
define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1, ptr%ptr_b ,i8 %x3,i32 %x4, <4 x float> %extra_param) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmadd_ss_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP25:%.*]], !prof [[PROF1]]
@@ -11777,11 +11777,11 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x
define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,ptr%ptr_b ,i8 %x3,i32 %x4, <4 x float> %extra_param) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_vfmadd_ss_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP25:%.*]], !prof [[PROF1]]
@@ -11838,10 +11838,10 @@ define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x f
define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,ptr%ptr_b ,i8 %x3,i32 %x4, <4 x float> %extra_param) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_vfmadd_ss_rm(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP20:%.*]], !prof [[PROF1]]
@@ -11891,7 +11891,7 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x
define <16 x i32> @test_x86_avx512_psll_d_512(<16 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psll_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -11910,9 +11910,9 @@ define <16 x i32> @test_x86_avx512_psll_d_512(<16 x i32> %a0, <4 x i32> %a1) #0
define <16 x i32> @test_x86_avx512_mask_psll_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psll_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -11941,8 +11941,8 @@ define <16 x i32> @test_x86_avx512_mask_psll_d_512(<16 x i32> %a0, <4 x i32> %a1
define <16 x i32> @test_x86_avx512_maskz_psll_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psll_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -11974,7 +11974,7 @@ declare <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32>, <4 x i32>) nounwind r
define <8 x i64> @test_x86_avx512_psll_q_512(<8 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psll_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -11993,9 +11993,9 @@ define <8 x i64> @test_x86_avx512_psll_q_512(<8 x i64> %a0, <2 x i64> %a1) #0 {
define <8 x i64> @test_x86_avx512_mask_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psll_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -12024,8 +12024,8 @@ define <8 x i64> @test_x86_avx512_mask_psll_q_512(<8 x i64> %a0, <2 x i64> %a1,
define <8 x i64> @test_x86_avx512_maskz_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psll_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -12070,8 +12070,8 @@ define <16 x i32> @test_x86_avx512_pslli_d_512(<16 x i32> %a0) #0 {
define <16 x i32> @test_x86_avx512_mask_pslli_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_pslli_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -12095,7 +12095,7 @@ define <16 x i32> @test_x86_avx512_mask_pslli_d_512(<16 x i32> %a0, <16 x i32> %
define <16 x i32> @test_x86_avx512_maskz_pslli_d_512(<16 x i32> %a0, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_pslli_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP3]], zeroinitializer
@@ -12135,8 +12135,8 @@ define <8 x i64> @test_x86_avx512_pslli_q_512(<8 x i64> %a0) #0 {
define <8 x i64> @test_x86_avx512_mask_pslli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_pslli_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -12160,7 +12160,7 @@ define <8 x i64> @test_x86_avx512_mask_pslli_q_512(<8 x i64> %a0, <8 x i64> %pas
define <8 x i64> @test_x86_avx512_maskz_pslli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_pslli_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP3]], zeroinitializer
@@ -12187,7 +12187,7 @@ declare <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64>, i32) nounwind readnone
define <8 x i64> @test_x86_avx512_psra_q_512(<8 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psra_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -12206,9 +12206,9 @@ define <8 x i64> @test_x86_avx512_psra_q_512(<8 x i64> %a0, <2 x i64> %a1) #0 {
define <8 x i64> @test_x86_avx512_mask_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psra_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -12237,8 +12237,8 @@ define <8 x i64> @test_x86_avx512_mask_psra_q_512(<8 x i64> %a0, <2 x i64> %a1,
define <8 x i64> @test_x86_avx512_maskz_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psra_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -12270,7 +12270,7 @@ declare <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64>, <2 x i64>) nounwind rea
define <16 x i32> @test_x86_avx512_psra_d_512(<16 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psra_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -12289,9 +12289,9 @@ define <16 x i32> @test_x86_avx512_psra_d_512(<16 x i32> %a0, <4 x i32> %a1) #0
define <16 x i32> @test_x86_avx512_mask_psra_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psra_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -12320,8 +12320,8 @@ define <16 x i32> @test_x86_avx512_mask_psra_d_512(<16 x i32> %a0, <4 x i32> %a1
define <16 x i32> @test_x86_avx512_maskz_psra_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psra_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -12367,8 +12367,8 @@ define <8 x i64> @test_x86_avx512_psrai_q_512(<8 x i64> %a0) #0 {
define <8 x i64> @test_x86_avx512_mask_psrai_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrai_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -12392,7 +12392,7 @@ define <8 x i64> @test_x86_avx512_mask_psrai_q_512(<8 x i64> %a0, <8 x i64> %pas
define <8 x i64> @test_x86_avx512_maskz_psrai_q_512(<8 x i64> %a0, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrai_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP3]], zeroinitializer
@@ -12432,8 +12432,8 @@ define <16 x i32> @test_x86_avx512_psrai_d_512(<16 x i32> %a0) #0 {
define <16 x i32> @test_x86_avx512_mask_psrai_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrai_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -12457,7 +12457,7 @@ define <16 x i32> @test_x86_avx512_mask_psrai_d_512(<16 x i32> %a0, <16 x i32> %
define <16 x i32> @test_x86_avx512_maskz_psrai_d_512(<16 x i32> %a0, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrai_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP3]], zeroinitializer
@@ -12485,7 +12485,7 @@ declare <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32>, i32) nounwind readno
define <16 x i32> @test_x86_avx512_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrl_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -12504,9 +12504,9 @@ define <16 x i32> @test_x86_avx512_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1) #0
define <16 x i32> @test_x86_avx512_mask_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrl_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -12535,8 +12535,8 @@ define <16 x i32> @test_x86_avx512_mask_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1
define <16 x i32> @test_x86_avx512_maskz_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrl_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -12568,7 +12568,7 @@ declare <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32>, <4 x i32>) nounwind r
define <8 x i64> @test_x86_avx512_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrl_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -12587,9 +12587,9 @@ define <8 x i64> @test_x86_avx512_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1) #0 {
define <8 x i64> @test_x86_avx512_mask_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrl_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -12618,8 +12618,8 @@ define <8 x i64> @test_x86_avx512_mask_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1,
define <8 x i64> @test_x86_avx512_maskz_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrl_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -12664,8 +12664,8 @@ define <16 x i32> @test_x86_avx512_psrli_d_512(<16 x i32> %a0) #0 {
define <16 x i32> @test_x86_avx512_mask_psrli_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrli_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer
@@ -12689,7 +12689,7 @@ define <16 x i32> @test_x86_avx512_mask_psrli_d_512(<16 x i32> %a0, <16 x i32> %
define <16 x i32> @test_x86_avx512_maskz_psrli_d_512(<16 x i32> %a0, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrli_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP3]], zeroinitializer
@@ -12729,8 +12729,8 @@ define <8 x i64> @test_x86_avx512_psrli_q_512(<8 x i64> %a0) #0 {
define <8 x i64> @test_x86_avx512_mask_psrli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrli_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer
@@ -12754,7 +12754,7 @@ define <8 x i64> @test_x86_avx512_mask_psrli_q_512(<8 x i64> %a0, <8 x i64> %pas
define <8 x i64> @test_x86_avx512_maskz_psrli_q_512(<8 x i64> %a0, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrli_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP3]], zeroinitializer
@@ -12780,7 +12780,7 @@ declare <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64>, i32) nounwind readnone
define <16 x i32> @test_x86_avx512_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psllv_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -12817,9 +12817,9 @@ define <16 x i32> @test_x86_avx512_psllv_d_512_const() #0 {
define <16 x i32> @test_x86_avx512_mask_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psllv_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -12846,8 +12846,8 @@ define <16 x i32> @test_x86_avx512_mask_psllv_d_512(<16 x i32> %a0, <16 x i32> %
define <16 x i32> @test_x86_avx512_maskz_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psllv_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -12876,7 +12876,7 @@ declare <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32>, <16 x i32>) nounwind
define <8 x i64> @test_x86_avx512_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psllv_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64>
@@ -12913,9 +12913,9 @@ define <8 x i64> @test_x86_avx512_psllv_q_512_const() #0 {
define <8 x i64> @test_x86_avx512_mask_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psllv_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64>
@@ -12942,8 +12942,8 @@ define <8 x i64> @test_x86_avx512_mask_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1,
define <8 x i64> @test_x86_avx512_maskz_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psllv_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64>
@@ -12972,7 +12972,7 @@ declare <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64>, <8 x i64>) nounwind re
define <16 x i32> @test_x86_avx512_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrav_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -12989,9 +12989,9 @@ define <16 x i32> @test_x86_avx512_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1) #
define <16 x i32> @test_x86_avx512_mask_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrav_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -13018,8 +13018,8 @@ define <16 x i32> @test_x86_avx512_mask_psrav_d_512(<16 x i32> %a0, <16 x i32> %
define <16 x i32> @test_x86_avx512_maskz_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrav_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -13048,7 +13048,7 @@ declare <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32>, <16 x i32>) nounwind
define <8 x i64> @test_x86_avx512_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrav_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64>
@@ -13065,9 +13065,9 @@ define <8 x i64> @test_x86_avx512_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1) #0 {
define <8 x i64> @test_x86_avx512_mask_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrav_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64>
@@ -13094,8 +13094,8 @@ define <8 x i64> @test_x86_avx512_mask_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1,
define <8 x i64> @test_x86_avx512_maskz_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrav_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64>
@@ -13124,7 +13124,7 @@ declare <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64>, <8 x i64>) nounwind re
define <16 x i32> @test_x86_avx512_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrlv_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -13161,9 +13161,9 @@ define <16 x i32> @test_x86_avx512_psrlv_d_512_const() #0 {
define <16 x i32> @test_x86_avx512_mask_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrlv_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -13190,8 +13190,8 @@ define <16 x i32> @test_x86_avx512_mask_psrlv_d_512(<16 x i32> %a0, <16 x i32> %
define <16 x i32> @test_x86_avx512_maskz_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrlv_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -13220,7 +13220,7 @@ declare <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32>, <16 x i32>) nounwind
define <8 x i64> @test_x86_avx512_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrlv_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64>
@@ -13257,9 +13257,9 @@ define <8 x i64> @test_x86_avx512_psrlv_q_512_const() #0 {
define <8 x i64> @test_x86_avx512_mask_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrlv_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64>
@@ -13286,8 +13286,8 @@ define <8 x i64> @test_x86_avx512_mask_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1,
define <8 x i64> @test_x86_avx512_maskz_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrlv_q_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64>
@@ -13414,13 +13414,13 @@ define <16 x float> @bad_mask_transition(<8 x double> %a, <8 x double> %b, <8 x
; CHECK-LABEL: @bad_mask_transition(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 384) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 384), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i64> [[TMP0]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP6]], 0
@@ -13487,9 +13487,9 @@ define <16 x float> @bad_mask_transition_2(<8 x double> %a, <8 x double> %b, <8
; CHECK-LABEL: @bad_mask_transition_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP0]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics-upgrade.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics-upgrade.ll
index 7bd35182..dbef575 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics-upgrade.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics-upgrade.ll
@@ -28,7 +28,7 @@ declare i32 @llvm.x86.avx512.kunpck.wd(i32, i32)
define i32 @test_int_x86_avx512_kunpck_wd(i32 %x0, i32 %x1) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_kunpck_wd(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[X0:%.*]] to <32 x i1>
@@ -54,7 +54,7 @@ declare i64 @llvm.x86.avx512.kunpck.dq(i64, i64)
define i64 @test_int_x86_avx512_kunpck_qd(i64 %x0, i64 %x1) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_kunpck_qd(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64 [[TMP1]] to <64 x i1>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i64 [[X0:%.*]] to <64 x i1>
@@ -80,8 +80,8 @@ declare <64 x i8> @llvm.x86.avx512.mask.pbroadcast.b.gpr.512(i8, <64 x i8>, i64)
define { <64 x i8>, <64 x i8>, <64 x i8> } @test_int_x86_avx512_mask_pbroadcast_b_gpr_512(i8 %x0, <64 x i8> %x1, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pbroadcast_b_gpr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <64 x i8> splat (i8 -1), i8 [[TMP1]], i64 0
; CHECK-NEXT: [[DOTSPLATINSERT3:%.*]] = insertelement <64 x i8> poison, i8 [[X0:%.*]], i64 0
@@ -134,8 +134,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.pbroadcast.w.gpr.512(i16, <32 x i16>, i
define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_pbroadcast_w_gpr_512(i16 %x0, <32 x i16> %x1, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pbroadcast_w_gpr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <32 x i16> splat (i16 -1), i16 [[TMP1]], i64 0
; CHECK-NEXT: [[DOTSPLATINSERT3:%.*]] = insertelement <32 x i16> poison, i16 [[X0:%.*]], i64 0
@@ -187,10 +187,10 @@ declare void @llvm.x86.avx512.mask.storeu.b.512(ptr, <64 x i8>, i64)
define void @test_int_x86_avx512_mask_storeu_b_512(ptr %ptr1, ptr %ptr2, <64 x i8> %x1, i64 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_storeu_b_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64 [[TMP1]] to <64 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64 [[X2:%.*]] to <64 x i1>
@@ -230,10 +230,10 @@ declare void @llvm.x86.avx512.mask.storeu.w.512(ptr, <32 x i16>, i32)
define void @test_int_x86_avx512_mask_storeu_w_512(ptr %ptr1, ptr %ptr2, <32 x i16> %x1, i32 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_storeu_w_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32 [[X2:%.*]] to <32 x i1>
@@ -274,8 +274,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.loadu.w.512(ptr, <32 x i16>, i32)
define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_loadu_w_512(ptr %ptr, ptr %ptr2, <32 x i16> %x1, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_loadu_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -343,8 +343,8 @@ declare <64 x i8> @llvm.x86.avx512.mask.loadu.b.512(ptr, <64 x i8>, i64)
define { <64 x i8>, <64 x i8>, <64 x i8> } @test_int_x86_avx512_mask_loadu_b_512(ptr %ptr, ptr %ptr2, <64 x i8> %x1, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_loadu_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -533,7 +533,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8>, <64 x i8>, i32, <
define <64 x i8> @test_int_x86_avx512_palignr_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_palignr_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 81, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112, i32 113>
@@ -547,10 +547,10 @@ define <64 x i8> @test_int_x86_avx512_palignr_512(<64 x i8> %x0, <64 x i8> %x1,
define <64 x i8> @test_int_x86_avx512_mask_palignr_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x3, i64 %x4) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_palignr_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 81, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112, i32 113>
; CHECK-NEXT: [[PALIGNR:%.*]] = shufflevector <64 x i8> [[X1:%.*]], <64 x i8> [[X0:%.*]], <64 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 81, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112, i32 113>
@@ -571,9 +571,9 @@ define <64 x i8> @test_int_x86_avx512_mask_palignr_512(<64 x i8> %x0, <64 x i8>
define <64 x i8> @test_int_x86_avx512_maskz_palignr_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x4) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_palignr_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 81, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112, i32 113>
; CHECK-NEXT: [[PALIGNR:%.*]] = shufflevector <64 x i8> [[X1:%.*]], <64 x i8> [[X0:%.*]], <64 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 81, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112, i32 113>
@@ -610,8 +610,8 @@ define <32 x i16> @test_int_x86_avx512_pshufh_w_512(<32 x i16> %x0, i32 %x1, <32
define <32 x i16> @test_int_x86_avx512_mask_pshufh_w_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pshufh_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP1]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 4, i32 4, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 12, i32 12, i32 12, i32 16, i32 17, i32 18, i32 19, i32 23, i32 20, i32 20, i32 20, i32 24, i32 25, i32 26, i32 27, i32 31, i32 28, i32 28, i32 28>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X0]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 4, i32 4, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 12, i32 12, i32 12, i32 16, i32 17, i32 18, i32 19, i32 23, i32 20, i32 20, i32 20, i32 24, i32 25, i32 26, i32 27, i32 31, i32 28, i32 28, i32 28>
@@ -633,7 +633,7 @@ define <32 x i16> @test_int_x86_avx512_mask_pshufh_w_512(<32 x i16> %x0, i32 %x1
define <32 x i16> @test_int_x86_avx512_maskz_pshufh_w_512(<32 x i16> %x0, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pshufh_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP1]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 4, i32 4, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 12, i32 12, i32 12, i32 16, i32 17, i32 18, i32 19, i32 23, i32 20, i32 20, i32 20, i32 24, i32 25, i32 26, i32 27, i32 31, i32 28, i32 28, i32 28>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X0]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 4, i32 4, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 12, i32 12, i32 12, i32 16, i32 17, i32 18, i32 19, i32 23, i32 20, i32 20, i32 20, i32 24, i32 25, i32 26, i32 27, i32 31, i32 28, i32 28, i32 28>
@@ -670,8 +670,8 @@ define <32 x i16> @test_int_x86_avx512_pshufl_w_512(<32 x i16> %x0, i32 %x1, <32
define <32 x i16> @test_int_x86_avx512_mask_pshufl_w_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pshufl_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP1]], <32 x i32> <i32 3, i32 0, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 8, i32 8, i32 8, i32 12, i32 13, i32 14, i32 15, i32 19, i32 16, i32 16, i32 16, i32 20, i32 21, i32 22, i32 23, i32 27, i32 24, i32 24, i32 24, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X0]], <32 x i32> <i32 3, i32 0, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 8, i32 8, i32 8, i32 12, i32 13, i32 14, i32 15, i32 19, i32 16, i32 16, i32 16, i32 20, i32 21, i32 22, i32 23, i32 27, i32 24, i32 24, i32 24, i32 28, i32 29, i32 30, i32 31>
@@ -693,7 +693,7 @@ define <32 x i16> @test_int_x86_avx512_mask_pshufl_w_512(<32 x i16> %x0, i32 %x1
define <32 x i16> @test_int_x86_avx512_maskz_pshufl_w_512(<32 x i16> %x0, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pshufl_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP1]], <32 x i32> <i32 3, i32 0, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 8, i32 8, i32 8, i32 12, i32 13, i32 14, i32 15, i32 19, i32 16, i32 16, i32 16, i32 20, i32 21, i32 22, i32 23, i32 27, i32 24, i32 24, i32 24, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X0]], <32 x i32> <i32 3, i32 0, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 8, i32 8, i32 8, i32 12, i32 13, i32 14, i32 15, i32 19, i32 16, i32 16, i32 16, i32 20, i32 21, i32 22, i32 23, i32 27, i32 24, i32 24, i32 24, i32 28, i32 29, i32 30, i32 31>
@@ -715,7 +715,7 @@ define <32 x i16> @test_int_x86_avx512_maskz_pshufl_w_512(<32 x i16> %x0, i32 %x
define i64 @test_pcmpeq_b(<64 x i8> %a, <64 x i8> %b) nounwind #0 {
; CHECK-LABEL: @test_pcmpeq_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <64 x i8> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
@@ -737,8 +737,8 @@ define i64 @test_pcmpeq_b(<64 x i8> %a, <64 x i8> %b) nounwind #0 {
define i64 @test_mask_pcmpeq_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_pcmpeq_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <64 x i8> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
@@ -770,7 +770,7 @@ declare i64 @llvm.x86.avx512.mask.pcmpeq.b.512(<64 x i8>, <64 x i8>, i64)
define i32 @test_pcmpeq_w(<32 x i16> %a, <32 x i16> %b) nounwind #0 {
; CHECK-LABEL: @test_pcmpeq_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i16> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
@@ -792,8 +792,8 @@ define i32 @test_pcmpeq_w(<32 x i16> %a, <32 x i16> %b) nounwind #0 {
define i32 @test_mask_pcmpeq_w(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_pcmpeq_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <32 x i16> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
@@ -825,7 +825,7 @@ declare i32 @llvm.x86.avx512.mask.pcmpeq.w.512(<32 x i16>, <32 x i16>, i32)
define i64 @test_pcmpgt_b(<64 x i8> %a, <64 x i8> %b) nounwind #0 {
; CHECK-LABEL: @test_pcmpgt_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <64 x i8> [[A:%.*]], splat (i8 -128)
; CHECK-NEXT: [[TMP4:%.*]] = xor <64 x i8> [[TMP1]], splat (i8 -1)
@@ -851,8 +851,8 @@ define i64 @test_pcmpgt_b(<64 x i8> %a, <64 x i8> %b) nounwind #0 {
define i64 @test_mask_pcmpgt_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_pcmpgt_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <64 x i8> [[A:%.*]], splat (i8 -128)
; CHECK-NEXT: [[TMP5:%.*]] = xor <64 x i8> [[TMP1]], splat (i8 -1)
@@ -888,7 +888,7 @@ declare i64 @llvm.x86.avx512.mask.pcmpgt.b.512(<64 x i8>, <64 x i8>, i64)
define i32 @test_pcmpgt_w(<32 x i16> %a, <32 x i16> %b) nounwind #0 {
; CHECK-LABEL: @test_pcmpgt_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i16> [[A:%.*]], splat (i16 -32768)
; CHECK-NEXT: [[TMP4:%.*]] = xor <32 x i16> [[TMP1]], splat (i16 -1)
@@ -914,8 +914,8 @@ define i32 @test_pcmpgt_w(<32 x i16> %a, <32 x i16> %b) nounwind #0 {
define i32 @test_mask_pcmpgt_w(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_pcmpgt_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <32 x i16> [[A:%.*]], splat (i16 -32768)
; CHECK-NEXT: [[TMP5:%.*]] = xor <32 x i16> [[TMP1]], splat (i16 -1)
@@ -953,7 +953,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.punpckhb.w.512(<64 x i8>, <64 x i8>, <64
define <64 x i8> @test_int_x86_avx512_punpckhb_w_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_punpckhb_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
@@ -967,9 +967,9 @@ define <64 x i8> @test_int_x86_avx512_punpckhb_w_512(<64 x i8> %x0, <64 x i8> %x
define <64 x i8> @test_int_x86_avx512_mask_punpckhb_w_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_punpckhb_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
@@ -993,7 +993,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.punpcklb.w.512(<64 x i8>, <64 x i8>, <64
define <64 x i8> @test_int_x86_avx512_punpcklb_w_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_punpcklb_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119>
@@ -1007,9 +1007,9 @@ define <64 x i8> @test_int_x86_avx512_punpcklb_w_512(<64 x i8> %x0, <64 x i8> %x
define <64 x i8> @test_int_x86_avx512_mask_punpcklb_w_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_punpcklb_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119>
@@ -1033,7 +1033,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.punpckhw.d.512(<32 x i16>, <32 x i16>,
define <32 x i16> @test_int_x86_avx512_punpckhw_d_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_punpckhw_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP2]], <32 x i32> <i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]], <32 x i32> <i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
@@ -1047,9 +1047,9 @@ define <32 x i16> @test_int_x86_avx512_punpckhw_d_512(<32 x i16> %x0, <32 x i16>
define <32 x i16> @test_int_x86_avx512_mask_punpckhw_d_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_punpckhw_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP2]], <32 x i32> <i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]], <32 x i32> <i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
@@ -1073,7 +1073,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.punpcklw.d.512(<32 x i16>, <32 x i16>,
define <32 x i16> @test_int_x86_avx512_punpcklw_d_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_punpcklw_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP2]], <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]], <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59>
@@ -1087,9 +1087,9 @@ define <32 x i16> @test_int_x86_avx512_punpcklw_d_512(<32 x i16> %x0, <32 x i16>
define <32 x i16> @test_int_x86_avx512_mask_punpcklw_d_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_punpcklw_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP2]], <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59>
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]], <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59>
@@ -1113,7 +1113,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pmaxs.b.512(<64 x i8>, <64 x i8>, <64 x
define <64 x i8> @test_int_x86_avx512_pmaxs_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmaxs_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.smax.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]])
@@ -1127,9 +1127,9 @@ define <64 x i8> @test_int_x86_avx512_pmaxs_b_512(<64 x i8> %x0, <64 x i8> %x1,
define <64 x i8> @test_int_x86_avx512_mask_pmaxs_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.smax.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]])
@@ -1153,7 +1153,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmaxs.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16> @test_int_x86_avx512_pmaxs_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmaxs_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.smax.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1167,9 +1167,9 @@ define <32 x i16> @test_int_x86_avx512_pmaxs_w_512(<32 x i16> %x0, <32 x i16> %x
define <32 x i16> @test_int_x86_avx512_mask_pmaxs_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.smax.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1193,7 +1193,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pmaxu.b.512(<64 x i8>, <64 x i8>, <64 x
define <64 x i8> @test_int_x86_avx512_pmaxu_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmaxu_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.umax.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]])
@@ -1207,9 +1207,9 @@ define <64 x i8> @test_int_x86_avx512_pmaxu_b_512(<64 x i8> %x0, <64 x i8> %x1,
define <64 x i8> @test_int_x86_avx512_mask_pmaxu_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.umax.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]])
@@ -1233,7 +1233,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmaxu.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16> @test_int_x86_avx512_pmaxu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmaxu_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.umax.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1247,9 +1247,9 @@ define <32 x i16> @test_int_x86_avx512_pmaxu_w_512(<32 x i16> %x0, <32 x i16> %x
define <32 x i16> @test_int_x86_avx512_mask_pmaxu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.umax.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1273,7 +1273,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pmins.b.512(<64 x i8>, <64 x i8>, <64 x
define <64 x i8> @test_int_x86_avx512_pmins_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmins_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.smin.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]])
@@ -1287,9 +1287,9 @@ define <64 x i8> @test_int_x86_avx512_pmins_b_512(<64 x i8> %x0, <64 x i8> %x1,
define <64 x i8> @test_int_x86_avx512_mask_pmins_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.smin.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]])
@@ -1313,7 +1313,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmins.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16> @test_int_x86_avx512_pmins_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmins_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.smin.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1327,9 +1327,9 @@ define <32 x i16> @test_int_x86_avx512_pmins_w_512(<32 x i16> %x0, <32 x i16> %x
define <32 x i16> @test_int_x86_avx512_mask_pmins_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.smin.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1353,7 +1353,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pminu.b.512(<64 x i8>, <64 x i8>, <64 x
define <64 x i8> @test_int_x86_avx512_pminu_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pminu_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.umin.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]])
@@ -1367,9 +1367,9 @@ define <64 x i8> @test_int_x86_avx512_pminu_b_512(<64 x i8> %x0, <64 x i8> %x1,
define <64 x i8> @test_int_x86_avx512_mask_pminu_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.umin.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]])
@@ -1393,7 +1393,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pminu.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16> @test_int_x86_avx512_pminu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pminu_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.umin.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1407,9 +1407,9 @@ define <32 x i16> @test_int_x86_avx512_pminu_w_512(<32 x i16> %x0, <32 x i16> %x
define <32 x i16> @test_int_x86_avx512_mask_pminu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.umin.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1448,8 +1448,8 @@ define <32 x i16> @test_int_x86_avx512_pmovzxb_w_512(<32 x i8> %x0, <32 x i16> %
define <32 x i16> @test_int_x86_avx512_mask_pmovzxb_w_512(<32 x i8> %x0, <32 x i16> %x1, i32 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovzxb_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i8> [[TMP1]], <32 x i8> splat (i8 -1), <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i8> [[X0:%.*]], <32 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -1473,7 +1473,7 @@ define <32 x i16> @test_int_x86_avx512_mask_pmovzxb_w_512(<32 x i8> %x0, <32 x i
define <32 x i16> @test_int_x86_avx512_maskz_pmovzxb_w_512(<32 x i8> %x0, i32 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovzxb_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i8> [[TMP1]], <32 x i8> splat (i8 -1), <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i8> [[X0:%.*]], <32 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -1514,8 +1514,8 @@ define <32 x i16> @test_int_x86_avx512_pmovsxb_w_512(<32 x i8> %x0, <32 x i16> %
define <32 x i16> @test_int_x86_avx512_mask_pmovsxb_w_512(<32 x i8> %x0, <32 x i16> %x1, i32 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovsxb_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i8> [[TMP1]], <32 x i8> splat (i8 -1), <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i8> [[X0:%.*]], <32 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -1539,7 +1539,7 @@ define <32 x i16> @test_int_x86_avx512_mask_pmovsxb_w_512(<32 x i8> %x0, <32 x i
define <32 x i16> @test_int_x86_avx512_maskz_pmovsxb_w_512(<32 x i8> %x0, i32 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovsxb_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i8> [[TMP1]], <32 x i8> splat (i8 -1), <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i8> [[X0:%.*]], <32 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -1565,7 +1565,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16>, <8 x i16>, <32 x
define <32 x i16> @test_int_x86_avx512_psrl_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_psrl_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1585,9 +1585,9 @@ define <32 x i16> @test_int_x86_avx512_psrl_w_512(<32 x i16> %x0, <8 x i16> %x1,
define <32 x i16> @test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_psrl_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -1615,8 +1615,8 @@ define <32 x i16> @test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16>
define <32 x i16> @test_int_x86_avx512_maskz_psrl_w_512(<32 x i16> %x0, <8 x i16> %x1, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_psrl_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -1646,8 +1646,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16>, i32, <32 x i16>
define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_psrl_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_psrl_wi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrli.w.512(<32 x i16> [[TMP1]], i32 3)
; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP4]], zeroinitializer
@@ -1697,7 +1697,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psra.w.512(<32 x i16>, <8 x i16>, <32 x
define <32 x i16> @test_int_x86_avx512_psra_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_psra_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1717,9 +1717,9 @@ define <32 x i16> @test_int_x86_avx512_psra_w_512(<32 x i16> %x0, <8 x i16> %x1,
define <32 x i16> @test_int_x86_avx512_mask_psra_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_psra_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -1747,8 +1747,8 @@ define <32 x i16> @test_int_x86_avx512_mask_psra_w_512(<32 x i16> %x0, <8 x i16>
define <32 x i16> @test_int_x86_avx512_maskz_psra_w_512(<32 x i16> %x0, <8 x i16> %x1, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_psra_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -1778,8 +1778,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.psra.wi.512(<32 x i16>, i32, <32 x i16>
define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_psra_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_psra_wi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrai.w.512(<32 x i16> [[TMP1]], i32 3)
; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP4]], zeroinitializer
@@ -1829,7 +1829,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psll.w.512(<32 x i16>, <8 x i16>, <32 x
define <32 x i16> @test_int_x86_avx512_psll_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_psll_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1849,9 +1849,9 @@ define <32 x i16> @test_int_x86_avx512_psll_w_512(<32 x i16> %x0, <8 x i16> %x1,
define <32 x i16> @test_int_x86_avx512_mask_psll_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_psll_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -1879,8 +1879,8 @@ define <32 x i16> @test_int_x86_avx512_mask_psll_w_512(<32 x i16> %x0, <8 x i16>
define <32 x i16> @test_int_x86_avx512_maskz_psll_w_512(<32 x i16> %x0, <8 x i16> %x1, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_psll_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -1910,8 +1910,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.psll.wi.512(<32 x i16>, i32, <32 x i16>
define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_psll_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_psll_wi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16> [[TMP1]], i32 3)
; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP4]], zeroinitializer
@@ -1961,7 +1961,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8>, <64 x i8>, <64 x
define <64 x i8> @test_int_x86_avx512_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pshuf_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> [[TMP1]], <64 x i8> [[X1:%.*]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP2]], [[TMP4]]
@@ -1976,9 +1976,9 @@ define <64 x i8> @test_int_x86_avx512_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1,
define <64 x i8> @test_int_x86_avx512_mask_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pshuf_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> [[TMP1]], <64 x i8> [[X1:%.*]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP2]], [[TMP13]]
@@ -2035,7 +2035,7 @@ define <32 x i16> @test_int_x86_avx512_cvtmask2w_512(i32 %x0) nounwind #0 {
define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -2053,9 +2053,9 @@ define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) no
define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -2081,8 +2081,8 @@ define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <
define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -2107,7 +2107,7 @@ define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2137,10 +2137,10 @@ define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) nounw
define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -2177,9 +2177,9 @@ define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32
define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -2216,7 +2216,7 @@ define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i32
define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rmb_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2252,10 +2252,10 @@ define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) noun
define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rmbk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -2298,9 +2298,9 @@ define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <32
define <32 x i16> @test_mask_packs_epi32_rmbkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rmbkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -2346,7 +2346,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32>, <16 x i32>, <3
define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi16_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A:%.*]] = sext <32 x i1> [[TMP3]] to <32 x i16>
@@ -2364,9 +2364,9 @@ define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nou
define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi16_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A:%.*]] = sext <32 x i1> [[TMP5]] to <32 x i16>
@@ -2392,8 +2392,8 @@ define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <6
define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi16_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A:%.*]] = sext <32 x i1> [[TMP4]] to <32 x i16>
@@ -2418,7 +2418,7 @@ define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi16_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2448,10 +2448,10 @@ define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwi
define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi16_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -2488,9 +2488,9 @@ define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 x
define <64 x i8> @test_mask_packs_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packs_epi16_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -2531,7 +2531,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16>, <32 x i16>, <64
define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -2549,9 +2549,9 @@ define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) n
define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP17:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -2577,8 +2577,8 @@ define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b,
define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -2603,7 +2603,7 @@ define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2633,10 +2633,10 @@ define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) noun
define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -2673,9 +2673,9 @@ define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32
define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -2712,7 +2712,7 @@ define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i3
define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rmb_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2748,10 +2748,10 @@ define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) nou
define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rmbk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -2794,9 +2794,9 @@ define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <3
define <32 x i16> @test_mask_packus_epi32_rmbkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rmbkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -2842,7 +2842,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32>, <16 x i32>, <3
define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi16_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = sext <32 x i1> [[TMP3]] to <32 x i16>
@@ -2860,9 +2860,9 @@ define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) no
define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi16_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP17:%.*]] = sext <32 x i1> [[TMP5]] to <32 x i16>
@@ -2888,8 +2888,8 @@ define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <
define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi16_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = sext <32 x i1> [[TMP4]] to <32 x i16>
@@ -2914,7 +2914,7 @@ define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b,
define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi16_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2944,10 +2944,10 @@ define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounw
define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi16_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -2984,9 +2984,9 @@ define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64
define <64 x i8> @test_mask_packus_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_packus_epi16_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -3026,7 +3026,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16>, <32 x i16>, <64
define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) nounwind #0 {
; CHECK-LABEL: @test_cmp_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <64 x i8> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
@@ -3142,8 +3142,8 @@ define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) nounwind #0 {
define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_cmp_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <64 x i8> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
@@ -3329,7 +3329,7 @@ declare i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8>, <64 x i8>, i32, i64) noun
define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) nounwind #0 {
; CHECK-LABEL: @test_ucmp_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <64 x i8> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
@@ -3437,8 +3437,8 @@ define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) nounwind #0 {
define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_x86_avx512_ucmp_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <64 x i8> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
@@ -3616,7 +3616,7 @@ declare i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8>, <64 x i8>, i32, i64) nou
define i32 @test_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1) nounwind #0 {
; CHECK-LABEL: @test_cmp_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i16> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
@@ -3732,8 +3732,8 @@ define i32 @test_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1) nounwind #0 {
define i32 @test_mask_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_cmp_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <32 x i16> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
@@ -3919,7 +3919,7 @@ declare i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16>, <32 x i16>, i32, i32) no
define i32 @test_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1) nounwind #0 {
; CHECK-LABEL: @test_ucmp_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i16> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
@@ -4027,8 +4027,8 @@ define i32 @test_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1) nounwind #0 {
define i32 @test_mask_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_ucmp_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = xor <32 x i16> [[A0:%.*]], [[A1:%.*]]
; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
@@ -4209,7 +4209,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pavg.b.512(<64 x i8>, <64 x i8>, <64 x i
define <64 x i8> @mm512_avg_epu8(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 {
; CHECK-LABEL: @mm512_avg_epu8(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.x86.avx512.pavg.b.512(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]])
@@ -4223,9 +4223,9 @@ define <64 x i8> @mm512_avg_epu8(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i6
define <64 x i8> @mm512_mask_avg_epu8(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 {
; CHECK-LABEL: @mm512_mask_avg_epu8(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.x86.avx512.pavg.b.512(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]])
@@ -4249,7 +4249,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pavg.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16> @mm512_avg_epu16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @mm512_avg_epu16(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pavg.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -4263,9 +4263,9 @@ define <32 x i16> @mm512_avg_epu16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x
define <32 x i16> @mm512_mask_avg_epu16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @mm512_mask_avg_epu16(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pavg.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -4304,8 +4304,8 @@ define <32 x i16> @test_int_x86_avx512_pabs_w_512(<32 x i16> %x0, <32 x i16> %x1
define <32 x i16> @test_int_x86_avx512_mask_pabs_w_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pabs_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <32 x i16> [[X0:%.*]], splat (i16 -32768)
; CHECK-NEXT: [[TMP13:%.*]] = select <32 x i1> [[TMP12]], <32 x i16> splat (i16 -1), <32 x i16> [[TMP1]]
@@ -4346,8 +4346,8 @@ define <64 x i8> @test_int_x86_avx512_pabs_b_512(<64 x i8> %x0, <64 x i8> %x1) n
define <64 x i8> @test_int_x86_avx512_mask_pabs_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pabs_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <64 x i8> [[X0:%.*]], splat (i8 -128)
; CHECK-NEXT: [[TMP13:%.*]] = select <64 x i1> [[TMP12]], <64 x i8> splat (i8 -1), <64 x i8> [[TMP1]]
@@ -4373,8 +4373,8 @@ declare i64 @llvm.x86.avx512.ptestm.b.512(<64 x i8>, <64 x i8>, i64)
define i64 @test_int_x86_avx512_ptestm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_ptestm_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = and <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = and <64 x i8> [[X0:%.*]], [[TMP2]]
@@ -4432,8 +4432,8 @@ declare i32 @llvm.x86.avx512.ptestm.w.512(<32 x i16>, <32 x i16>, i32)
define i32 @test_int_x86_avx512_ptestm_w_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_ptestm_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = and <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = and <32 x i16> [[X0:%.*]], [[TMP2]]
@@ -4491,8 +4491,8 @@ declare i64 @llvm.x86.avx512.ptestnm.b.512(<64 x i8>, <64 x i8>, i64 %x2)
define i64 @test_int_x86_avx512_ptestnm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_ptestnm_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = and <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = and <64 x i8> [[X0:%.*]], [[TMP2]]
@@ -4550,8 +4550,8 @@ declare i32 @llvm.x86.avx512.ptestnm.w.512(<32 x i16>, <32 x i16>, i32 %x2)
define i32 @test_int_x86_avx512_ptestnm_w_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_ptestnm_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = and <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = and <32 x i16> [[X0:%.*]], [[TMP2]]
@@ -4655,7 +4655,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmulhu.w.512(<32 x i16>, <32 x i16>, <3
define <32 x i16> @test_int_x86_avx512_pmulhu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmulhu_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -4669,9 +4669,9 @@ define <32 x i16> @test_int_x86_avx512_pmulhu_w_512(<32 x i16> %x0, <32 x i16> %
define <32 x i16> @test_int_x86_avx512_mask_pmulhu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmulhu_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -4695,7 +4695,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmulh.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16> @test_int_x86_avx512_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmulh_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -4709,9 +4709,9 @@ define <32 x i16> @test_int_x86_avx512_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x
define <32 x i16> @test_int_x86_avx512_mask_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmulh_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -4735,7 +4735,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmul.hr.sw.512(<32 x i16>, <32 x i16>,
define <32 x i16> @test_int_x86_avx512_pmulhr_sw_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmulhr_sw_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -4749,9 +4749,9 @@ define <32 x i16> @test_int_x86_avx512_pmulhr_sw_512(<32 x i16> %x0, <32 x i16>
define <32 x i16> @test_int_x86_avx512_mask_pmulhr_sw_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmulhr_sw_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -4775,7 +4775,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmaddubs.w.512(<64 x i8>, <64 x i8>, <3
define <32 x i16> @test_int_x86_avx512_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmaddubs_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <64 x i8> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer
@@ -4801,9 +4801,9 @@ define <32 x i16> @test_int_x86_avx512_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> %
define <32 x i16> @test_int_x86_avx512_mask_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmaddubs_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <64 x i8> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer
@@ -4839,7 +4839,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmaddw.d.512(<32 x i16>, <32 x i16>, <1
define <16 x i32> @test_int_x86_avx512_pmaddw_d_512(<32 x i16> %x0, <32 x i16> %x1, <16 x i32> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmaddw_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i16> [[TMP2]], zeroinitializer
@@ -4865,9 +4865,9 @@ define <16 x i32> @test_int_x86_avx512_pmaddw_d_512(<32 x i16> %x0, <32 x i16> %
define <16 x i32> @test_int_x86_avx512_mask_pmaddw_d_512(<32 x i16> %x0, <32 x i16> %x1, <16 x i32> %x2, i16 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmaddw_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <32 x i16> [[TMP2]], zeroinitializer
@@ -4903,7 +4903,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16>, <32 x i16>,
define <32 x i16> @test_int_x86_avx512_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_permvar_hi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -4917,9 +4917,9 @@ define <32 x i16> @test_int_x86_avx512_permvar_hi_512(<32 x i16> %x0, <32 x i16>
define <32 x i16> @test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_hi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -4941,8 +4941,8 @@ define <32 x i16> @test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x
define <32 x i16> @test_int_x86_avx512_maskz_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_hi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -4965,8 +4965,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.512(<32 x i16>, <32 x i16
define <32 x i16> @test_int_x86_avx512_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermt2var_hi_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[X0]] to <32 x i5>
@@ -4988,10 +4988,10 @@ define <32 x i16> @test_int_x86_avx512_vpermt2var_hi_512(<32 x i16> %x0, <32 x i
define <32 x i16> @test_int_x86_avx512_mask_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermt2var_hi_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i16> [[X0]] to <32 x i5>
; CHECK-NEXT: [[TMP101:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X4:%.*]], <32 x i16> [[TMP2]])
@@ -5022,10 +5022,10 @@ declare <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16>, <32 x i1
define <32 x i16> @test_int_x86_avx512_maskz_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_hi_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i16> [[X0]] to <32 x i5>
; CHECK-NEXT: [[TMP101:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X4:%.*]], <32 x i16> [[TMP2]])
@@ -5057,8 +5057,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16>, <32 x i16
define <32 x i16> @test_int_x86_avx512_vpermi2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_hi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[X1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[X1]] to <32 x i5>
; CHECK-NEXT: [[TMP100:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X3:%.*]], <32 x i16> [[TMP2]])
@@ -5080,9 +5080,9 @@ define <32 x i16> @test_int_x86_avx512_vpermi2var_hi_512(<32 x i16> %x0, <32 x i
define <32 x i16> @test_int_x86_avx512_mask_vpermi2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_hi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i16> [[TMP3]] to <32 x i5>
; CHECK-NEXT: [[TMP101:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X1:%.*]], <32 x i16> [[TMP2]])
@@ -5114,9 +5114,9 @@ declare <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8>, <64 x i8>, i32,
define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x3, i32 %x4) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_dbpsadbw_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <64 x i8> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -5188,7 +5188,7 @@ define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_dbpsadbw
define <32 x i16> @test_mask_adds_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epu16_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -5202,9 +5202,9 @@ define <32 x i16> @test_mask_adds_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) nou
define <32 x i16> @test_mask_adds_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epu16_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -5226,8 +5226,8 @@ define <32 x i16> @test_mask_adds_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <3
define <32 x i16> @test_mask_adds_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epu16_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -5248,7 +5248,7 @@ define <32 x i16> @test_mask_adds_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
define <32 x i16> @test_mask_adds_epu16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epu16_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -5274,10 +5274,10 @@ define <32 x i16> @test_mask_adds_epu16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwi
define <32 x i16> @test_mask_adds_epu16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epu16_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -5310,9 +5310,9 @@ define <32 x i16> @test_mask_adds_epu16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x
define <32 x i16> @test_mask_adds_epu16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epu16_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -5348,7 +5348,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.paddus.w.512(<32 x i16>, <32 x i16>, <3
define <32 x i16> @test_mask_subs_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epu16_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -5362,9 +5362,9 @@ define <32 x i16> @test_mask_subs_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) nou
define <32 x i16> @test_mask_subs_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epu16_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -5386,8 +5386,8 @@ define <32 x i16> @test_mask_subs_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <3
define <32 x i16> @test_mask_subs_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epu16_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -5408,7 +5408,7 @@ define <32 x i16> @test_mask_subs_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
define <32 x i16> @test_mask_subs_epu16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epu16_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -5434,10 +5434,10 @@ define <32 x i16> @test_mask_subs_epu16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwi
define <32 x i16> @test_mask_subs_epu16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epu16_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -5470,9 +5470,9 @@ define <32 x i16> @test_mask_subs_epu16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x
define <32 x i16> @test_mask_subs_epu16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epu16_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -5508,7 +5508,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psubus.w.512(<32 x i16>, <32 x i16>, <3
define <64 x i8> @test_mask_adds_epu8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epu8_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]])
@@ -5522,9 +5522,9 @@ define <64 x i8> @test_mask_adds_epu8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwin
define <64 x i8> @test_mask_adds_epu8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epu8_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]])
@@ -5546,8 +5546,8 @@ define <64 x i8> @test_mask_adds_epu8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x
define <64 x i8> @test_mask_adds_epu8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epu8_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]])
@@ -5568,7 +5568,7 @@ define <64 x i8> @test_mask_adds_epu8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %
define <64 x i8> @test_mask_adds_epu8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epu8_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -5594,10 +5594,10 @@ define <64 x i8> @test_mask_adds_epu8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind
define <64 x i8> @test_mask_adds_epu8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epu8_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -5630,9 +5630,9 @@ define <64 x i8> @test_mask_adds_epu8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8
define <64 x i8> @test_mask_adds_epu8_rmkz_512(<64 x i8> %a, ptr %ptr_b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epu8_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -5668,7 +5668,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.paddus.b.512(<64 x i8>, <64 x i8>, <64 x
define <64 x i8> @test_mask_subs_epu8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epu8_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]])
@@ -5682,9 +5682,9 @@ define <64 x i8> @test_mask_subs_epu8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwin
define <64 x i8> @test_mask_subs_epu8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epu8_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]])
@@ -5706,8 +5706,8 @@ define <64 x i8> @test_mask_subs_epu8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x
define <64 x i8> @test_mask_subs_epu8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epu8_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]])
@@ -5728,7 +5728,7 @@ define <64 x i8> @test_mask_subs_epu8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %
define <64 x i8> @test_mask_subs_epu8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epu8_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -5754,10 +5754,10 @@ define <64 x i8> @test_mask_subs_epu8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind
define <64 x i8> @test_mask_subs_epu8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epu8_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -5790,9 +5790,9 @@ define <64 x i8> @test_mask_subs_epu8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8
define <64 x i8> @test_mask_subs_epu8_rmkz_512(<64 x i8> %a, ptr %ptr_b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epu8_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -5828,7 +5828,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.psubus.b.512(<64 x i8>, <64 x i8>, <64 x
define <32 x i16> @test_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 {
; CHECK-LABEL: @test_adds_epi16_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -5842,9 +5842,9 @@ define <32 x i16> @test_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind
define <32 x i16> @test_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_adds_epi16_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -5868,8 +5868,8 @@ define <32 x i16> @test_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i
define <32 x i16> @test_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_adds_epi16_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -5892,7 +5892,7 @@ define <32 x i16> @test_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %m
define <32 x i16> @test_adds_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_adds_epi16_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -5918,10 +5918,10 @@ define <32 x i16> @test_adds_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0
define <32 x i16> @test_adds_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_adds_epi16_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -5956,9 +5956,9 @@ define <32 x i16> @test_adds_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16>
define <32 x i16> @test_adds_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_adds_epi16_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -5996,7 +5996,7 @@ declare <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16>, <32 x i16>)
define <32 x i16> @test_mask_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epi16_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -6010,9 +6010,9 @@ define <32 x i16> @test_mask_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nou
define <32 x i16> @test_mask_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epi16_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -6034,8 +6034,8 @@ define <32 x i16> @test_mask_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <3
define <32 x i16> @test_mask_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epi16_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -6056,7 +6056,7 @@ define <32 x i16> @test_mask_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
define <32 x i16> @test_mask_adds_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epi16_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -6082,10 +6082,10 @@ define <32 x i16> @test_mask_adds_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwi
define <32 x i16> @test_mask_adds_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epi16_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -6118,9 +6118,9 @@ define <32 x i16> @test_mask_adds_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x
define <32 x i16> @test_mask_adds_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epi16_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -6156,7 +6156,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.padds.w.512(<32 x i16>, <32 x i16>, <32
define <32 x i16> @test_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 {
; CHECK-LABEL: @test_subs_epi16_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -6170,9 +6170,9 @@ define <32 x i16> @test_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind
define <32 x i16> @test_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_subs_epi16_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -6196,8 +6196,8 @@ define <32 x i16> @test_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i
define <32 x i16> @test_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_subs_epi16_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -6220,7 +6220,7 @@ define <32 x i16> @test_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %m
define <32 x i16> @test_subs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_subs_epi16_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -6246,10 +6246,10 @@ define <32 x i16> @test_subs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0
define <32 x i16> @test_subs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_subs_epi16_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -6284,9 +6284,9 @@ define <32 x i16> @test_subs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16>
define <32 x i16> @test_subs_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_subs_epi16_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -6324,7 +6324,7 @@ declare <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16>, <32 x i16>)
define <32 x i16> @test_mask_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epi16_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -6338,9 +6338,9 @@ define <32 x i16> @test_mask_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nou
define <32 x i16> @test_mask_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epi16_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -6362,8 +6362,8 @@ define <32 x i16> @test_mask_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <3
define <32 x i16> @test_mask_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epi16_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]])
@@ -6384,7 +6384,7 @@ define <32 x i16> @test_mask_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
define <32 x i16> @test_mask_subs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epi16_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -6410,10 +6410,10 @@ define <32 x i16> @test_mask_subs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwi
define <32 x i16> @test_mask_subs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epi16_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -6446,9 +6446,9 @@ define <32 x i16> @test_mask_subs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x
define <32 x i16> @test_mask_subs_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i32 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epi16_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -6484,7 +6484,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16>, <32 x i16>, <32
define <64 x i8> @test_mask_adds_epi8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epi8_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]])
@@ -6498,9 +6498,9 @@ define <64 x i8> @test_mask_adds_epi8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwin
define <64 x i8> @test_mask_adds_epi8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epi8_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]])
@@ -6522,8 +6522,8 @@ define <64 x i8> @test_mask_adds_epi8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x
define <64 x i8> @test_mask_adds_epi8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epi8_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]])
@@ -6544,7 +6544,7 @@ define <64 x i8> @test_mask_adds_epi8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %
define <64 x i8> @test_mask_adds_epi8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epi8_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -6570,10 +6570,10 @@ define <64 x i8> @test_mask_adds_epi8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind
define <64 x i8> @test_mask_adds_epi8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epi8_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -6606,9 +6606,9 @@ define <64 x i8> @test_mask_adds_epi8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8
define <64 x i8> @test_mask_adds_epi8_rmkz_512(<64 x i8> %a, ptr %ptr_b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_adds_epi8_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -6644,7 +6644,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8>, <64 x i8>, <64 x
define <64 x i8> @test_mask_subs_epi8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epi8_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]])
@@ -6658,9 +6658,9 @@ define <64 x i8> @test_mask_subs_epi8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwin
define <64 x i8> @test_mask_subs_epi8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epi8_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]])
@@ -6682,8 +6682,8 @@ define <64 x i8> @test_mask_subs_epi8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x
define <64 x i8> @test_mask_subs_epi8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epi8_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]])
@@ -6704,7 +6704,7 @@ define <64 x i8> @test_mask_subs_epi8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %
define <64 x i8> @test_mask_subs_epi8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epi8_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -6730,10 +6730,10 @@ define <64 x i8> @test_mask_subs_epi8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind
define <64 x i8> @test_mask_subs_epi8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epi8_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -6766,9 +6766,9 @@ define <64 x i8> @test_mask_subs_epi8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8
define <64 x i8> @test_mask_subs_epi8_rmkz_512(<64 x i8> %a, ptr %ptr_b, i64 %mask) nounwind #0 {
; CHECK-LABEL: @test_mask_subs_epi8_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -6806,7 +6806,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16>, <32 x i16>, <32 x
define <32 x i16> @test_int_x86_avx512_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_psrlv32hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -6820,9 +6820,9 @@ define <32 x i16> @test_int_x86_avx512_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1,
define <32 x i16> @test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_psrlv32hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -6844,8 +6844,8 @@ define <32 x i16> @test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16>
define <32 x i16> @test_int_x86_avx512_maskz_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_psrlv32hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -6869,7 +6869,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16>, <32 x i16>, <32
define <32 x i16> @test_int_x86_avx512_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_psrav32_hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -6883,9 +6883,9 @@ define <32 x i16> @test_int_x86_avx512_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1
define <32 x i16> @test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_psrav32_hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -6907,8 +6907,8 @@ define <32 x i16> @test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16
define <32 x i16> @test_int_x86_avx512_maskz_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_psrav32_hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -6932,7 +6932,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psllv32hi(<32 x i16>, <32 x i16>, <32 x
define <32 x i16> @test_int_x86_avx512_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_psllv32hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -6946,9 +6946,9 @@ define <32 x i16> @test_int_x86_avx512_psllv32hi(<32 x i16> %x0, <32 x i16> %x1,
define <32 x i16> @test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_psllv32hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -6970,8 +6970,8 @@ define <32 x i16> @test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16>
define <32 x i16> @test_int_x86_avx512_maskz_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_psllv32hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -7008,8 +7008,8 @@ define <32 x i8> @test_int_x86_avx512_pmov_wb_512(<32 x i16> %x0, <32 x i8> %x1)
define <32 x i8> @test_int_x86_avx512_mask_pmov_wb_512(<32 x i16> %x0, <32 x i8> %x1, i32 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_wb_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8>
; CHECK-NEXT: [[TMP4:%.*]] = trunc <32 x i16> [[X0:%.*]] to <32 x i8>
@@ -7031,7 +7031,7 @@ define <32 x i8> @test_int_x86_avx512_mask_pmov_wb_512(<32 x i16> %x0, <32 x i8>
define <32 x i8> @test_int_x86_avx512_maskz_pmov_wb_512(<32 x i16> %x0, i32 %x2) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmov_wb_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8>
; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[X0:%.*]] to <32 x i8>
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics.ll
index 8bf6d5a..481751b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics.ll
@@ -24,7 +24,7 @@ define i32 @test_int_x86_avx512_kadd_d(<32 x i16> %A, <32 x i16> %B) nounwind #0
; CHECK-LABEL: @test_int_x86_avx512_kadd_d(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = xor <32 x i16> [[A:%.*]], zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = or <32 x i16> [[TMP0]], zeroinitializer
@@ -74,7 +74,7 @@ define i32 @test_int_x86_avx512_kadd_q(<64 x i8> %A, <64 x i8> %B) nounwind #0 {
; CHECK-LABEL: @test_int_x86_avx512_kadd_q(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = xor <64 x i8> [[A:%.*]], zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = or <64 x i8> [[TMP0]], zeroinitializer
@@ -123,7 +123,7 @@ declare <64 x i1> @llvm.x86.avx512.kadd.q(<64 x i1>, <64 x i1>)
define i32 @test_x86_avx512_ktestc_d(<32 x i16> %A, <32 x i16> %B) #0 {
; CHECK-LABEL: @test_x86_avx512_ktestc_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i16> [[A:%.*]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP1]], zeroinitializer
@@ -165,7 +165,7 @@ declare i32 @llvm.x86.avx512.ktestc.d(<32 x i1>, <32 x i1>) nounwind readnone
define i32 @test_x86_avx512_ktestz_d(<32 x i16> %A, <32 x i16> %B) #0 {
; CHECK-LABEL: @test_x86_avx512_ktestz_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i16> [[A:%.*]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP1]], zeroinitializer
@@ -207,7 +207,7 @@ declare i32 @llvm.x86.avx512.ktestz.d(<32 x i1>, <32 x i1>) nounwind readnone
define i32 @test_x86_avx512_ktestc_q(<64 x i8> %A, <64 x i8> %B) #0 {
; CHECK-LABEL: @test_x86_avx512_ktestc_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <64 x i8> [[A:%.*]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = or <64 x i8> [[TMP1]], zeroinitializer
@@ -249,7 +249,7 @@ declare i32 @llvm.x86.avx512.ktestc.q(<64 x i1>, <64 x i1>) nounwind readnone
define i32 @test_x86_avx512_ktestz_q(<64 x i8> %A, <64 x i8> %B) #0 {
; CHECK-LABEL: @test_x86_avx512_ktestz_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor <64 x i8> [[A:%.*]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = or <64 x i8> [[TMP1]], zeroinitializer
@@ -291,7 +291,7 @@ declare i32 @llvm.x86.avx512.ktestz.q(<64 x i1>, <64 x i1>) nounwind readnone
define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -309,9 +309,9 @@ define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) #0
define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -339,8 +339,8 @@ define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <
define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -367,7 +367,7 @@ define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -397,10 +397,10 @@ define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) #0 {
define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -439,9 +439,9 @@ define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32
define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -480,7 +480,7 @@ define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i32
define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rmb_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -516,10 +516,10 @@ define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) #0 {
define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rmbk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -564,9 +564,9 @@ define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <32
define <32 x i16> @test_mask_packs_epi32_rmbkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) #0 {
; CHECK-LABEL: @test_mask_packs_epi32_rmbkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -614,7 +614,7 @@ declare <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32>, <16 x i32>)
define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) #0 {
; CHECK-LABEL: @test_mask_packs_epi16_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A:%.*]] = sext <32 x i1> [[TMP3]] to <32 x i16>
@@ -632,9 +632,9 @@ define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) #0
define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) #0 {
; CHECK-LABEL: @test_mask_packs_epi16_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A:%.*]] = sext <32 x i1> [[TMP5]] to <32 x i16>
@@ -662,8 +662,8 @@ define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <6
define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) #0 {
; CHECK-LABEL: @test_mask_packs_epi16_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A:%.*]] = sext <32 x i1> [[TMP4]] to <32 x i16>
@@ -690,7 +690,7 @@ define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) #0 {
; CHECK-LABEL: @test_mask_packs_epi16_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -720,10 +720,10 @@ define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) #0 {
define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) #0 {
; CHECK-LABEL: @test_mask_packs_epi16_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -762,9 +762,9 @@ define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 x
define <64 x i8> @test_mask_packs_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i64 %mask) #0 {
; CHECK-LABEL: @test_mask_packs_epi16_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -807,7 +807,7 @@ declare <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16>, <32 x i16>)
define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32>
@@ -825,9 +825,9 @@ define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) #
define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP17:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32>
@@ -855,8 +855,8 @@ define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b,
define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32>
@@ -883,7 +883,7 @@ define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -913,10 +913,10 @@ define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) #0 {
define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -955,9 +955,9 @@ define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32
define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -996,7 +996,7 @@ define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i3
define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rmb_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -1032,10 +1032,10 @@ define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) #0
define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rmbk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -1080,9 +1080,9 @@ define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <3
define <32 x i16> @test_mask_packus_epi32_rmbkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) #0 {
; CHECK-LABEL: @test_mask_packus_epi32_rmbkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -1130,7 +1130,7 @@ declare <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32>, <16 x i32>)
define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) #0 {
; CHECK-LABEL: @test_mask_packus_epi16_rr_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = sext <32 x i1> [[TMP3]] to <32 x i16>
@@ -1148,9 +1148,9 @@ define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) #0
define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) #0 {
; CHECK-LABEL: @test_mask_packus_epi16_rrk_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP17:%.*]] = sext <32 x i1> [[TMP5]] to <32 x i16>
@@ -1178,8 +1178,8 @@ define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <
define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) #0 {
; CHECK-LABEL: @test_mask_packus_epi16_rrkz_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = sext <32 x i1> [[TMP4]] to <32 x i16>
@@ -1206,7 +1206,7 @@ define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b,
define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) #0 {
; CHECK-LABEL: @test_mask_packus_epi16_rm_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -1236,10 +1236,10 @@ define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) #0 {
define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) #0 {
; CHECK-LABEL: @test_mask_packus_epi16_rmk_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
@@ -1278,9 +1278,9 @@ define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64
define <64 x i8> @test_mask_packus_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i64 %mask) #0 {
; CHECK-LABEL: @test_mask_packus_epi16_rmkz_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
@@ -1321,8 +1321,8 @@ declare <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16>, <32 x i16>)
define <32 x i16>@test_int_x86_avx512_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermt2var_hi_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[X0]] to <32 x i5>
@@ -1344,10 +1344,10 @@ define <32 x i16>@test_int_x86_avx512_vpermt2var_hi_512(<32 x i16> %x0, <32 x i1
define <32 x i16>@test_int_x86_avx512_mask_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermt2var_hi_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i16> [[X0]] to <32 x i5>
; CHECK-NEXT: [[TMP101:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X4:%.*]], <32 x i16> [[TMP2]])
@@ -1378,10 +1378,10 @@ define <32 x i16>@test_int_x86_avx512_mask_vpermt2var_hi_512(<32 x i16> %x0, <32
define <32 x i16>@test_int_x86_avx512_maskz_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_hi_512(
-; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[X0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i16> [[X0]] to <32 x i5>
; CHECK-NEXT: [[TMP101:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X4:%.*]], <32 x i16> [[TMP2]])
@@ -1415,8 +1415,8 @@ declare <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16>, <32 x i16>, <3
define <32 x i16>@test_int_x86_avx512_vpermi2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_hi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[X1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[X1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[X1]] to <32 x i5>
; CHECK-NEXT: [[TMP100:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X3:%.*]], <32 x i16> [[TMP2]])
@@ -1438,9 +1438,9 @@ define <32 x i16>@test_int_x86_avx512_vpermi2var_hi_512(<32 x i16> %x0, <32 x i1
define <32 x i16>@test_int_x86_avx512_mask_vpermi2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_hi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i16> [[TMP3]] to <32 x i5>
; CHECK-NEXT: [[TMP101:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X1:%.*]], <32 x i16> [[TMP2]])
@@ -1474,7 +1474,7 @@ declare <64 x i8> @llvm.x86.avx512.pavg.b.512(<64 x i8>, <64 x i8>)
define <64 x i8> @test_int_x86_avx512_pavg_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pavg_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.x86.avx512.pavg.b.512(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]])
@@ -1488,9 +1488,9 @@ define <64 x i8> @test_int_x86_avx512_pavg_b_512(<64 x i8> %x0, <64 x i8> %x1, <
define <64 x i8> @test_int_x86_avx512_mask_pavg_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pavg_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.x86.avx512.pavg.b.512(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]])
@@ -1516,7 +1516,7 @@ declare <32 x i16> @llvm.x86.avx512.pavg.w.512(<32 x i16>, <32 x i16>)
define <32 x i16> @test_int_x86_avx512_pavg_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pavg_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pavg.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1530,9 +1530,9 @@ define <32 x i16> @test_int_x86_avx512_pavg_w_512(<32 x i16> %x0, <32 x i16> %x1
define <32 x i16> @test_int_x86_avx512_mask_pavg_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pavg_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pavg.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1558,7 +1558,7 @@ declare <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8>, <64 x i8>)
define <64 x i8>@test_int_x86_avx512_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pshuf_b_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> [[TMP1]], <64 x i8> [[X1:%.*]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP2]], [[TMP3]]
@@ -1573,9 +1573,9 @@ define <64 x i8>@test_int_x86_avx512_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1) #
define <64 x i8>@test_int_x86_avx512_pshuf_b_512_mask(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %mask) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pshuf_b_512_mask(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> [[TMP1]], <64 x i8> [[X1:%.*]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP2]], [[TMP10]]
@@ -1600,8 +1600,8 @@ define <64 x i8>@test_int_x86_avx512_pshuf_b_512_mask(<64 x i8> %x0, <64 x i8> %
define <64 x i8>@test_int_x86_avx512_pshuf_b_512_maskz(<64 x i8> %x0, <64 x i8> %x1, i64 %mask) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pshuf_b_512_maskz(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> [[TMP1]], <64 x i8> [[X1:%.*]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP2]], [[TMP9]]
@@ -1628,7 +1628,7 @@ declare <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16>, <32 x i16>)
define <32 x i16> @test_int_x86_avx512_pmulhu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmulhu_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1642,9 +1642,9 @@ define <32 x i16> @test_int_x86_avx512_pmulhu_w_512(<32 x i16> %x0, <32 x i16> %
define <32 x i16> @test_int_x86_avx512_mask_pmulhu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmulhu_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1670,7 +1670,7 @@ declare <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16>, <32 x i16>)
define <32 x i16> @test_int_x86_avx512_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmulh_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1684,9 +1684,9 @@ define <32 x i16> @test_int_x86_avx512_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x
define <32 x i16> @test_int_x86_avx512_mask_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmulh_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1712,7 +1712,7 @@ declare <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16>, <32 x i16>)
define <32 x i16> @test_int_x86_avx512_pmulhr_sw_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmulhr_sw_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1726,9 +1726,9 @@ define <32 x i16> @test_int_x86_avx512_pmulhr_sw_512(<32 x i16> %x0, <32 x i16>
define <32 x i16> @test_int_x86_avx512_mask_pmulhr_sw_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmulhr_sw_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -1765,8 +1765,8 @@ define <32 x i8>@test_int_x86_avx512_pmov_wb_512(<32 x i16> %x0) #0 {
define <32 x i8>@test_int_x86_avx512_mask_pmov_wb_512(<32 x i16> %x0, <32 x i8> %x1, i32 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_wb_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8>
; CHECK-NEXT: [[TMP4:%.*]] = trunc <32 x i16> [[X0:%.*]] to <32 x i8>
@@ -1790,7 +1790,7 @@ define <32 x i8>@test_int_x86_avx512_mask_pmov_wb_512(<32 x i16> %x0, <32 x i8>
define <32 x i8>@test_int_x86_avx512_maskz_pmov_wb_512(<32 x i16> %x0, i32 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmov_wb_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8>
; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[X0:%.*]] to <32 x i8>
@@ -1816,8 +1816,8 @@ declare void @llvm.x86.avx512.mask.pmov.wb.mem.512(ptr %ptr, <32 x i16>, i32)
define void @test_int_x86_avx512_mask_pmov_wb_mem_512(ptr %ptr, <32 x i16> %x1, i32 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_wb_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP2]] to i512
@@ -1853,7 +1853,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pmovs.wb.512(<32 x i16>, <32 x i8>, i32)
define <32 x i8>@test_int_x86_avx512_pmovs_wb_512(<32 x i16> %x0, <32 x i8> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmovs_wb_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8>
; CHECK-NEXT: [[TMP4:%.*]] = select <32 x i1> splat (i1 true), <32 x i8> [[TMP3]], <32 x i8> [[TMP2]]
@@ -1867,9 +1867,9 @@ define <32 x i8>@test_int_x86_avx512_pmovs_wb_512(<32 x i16> %x0, <32 x i8> %x1)
define <32 x i8>@test_int_x86_avx512_mask_pmovs_wb_512(<32 x i16> %x0, <32 x i8> %x1, i32 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_wb_512(
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32 [[X2:%.*]] to <32 x i1>
; CHECK-NEXT: [[TMP4:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8>
@@ -1890,7 +1890,7 @@ define <32 x i8>@test_int_x86_avx512_mask_pmovs_wb_512(<32 x i16> %x0, <32 x i8>
define <32 x i8>@test_int_x86_avx512_maskz_pmovs_wb_512(<32 x i16> %x0, i32 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovs_wb_512(
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[X2:%.*]] to <32 x i1>
@@ -1915,8 +1915,8 @@ declare void @llvm.x86.avx512.mask.pmovs.wb.mem.512(ptr %ptr, <32 x i16>, i32)
define void @test_int_x86_avx512_mask_pmovs_wb_mem_512(ptr %ptr, <32 x i16> %x1, i32 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_wb_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP2]] to i512
@@ -1952,7 +1952,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pmovus.wb.512(<32 x i16>, <32 x i8>, i32
define <32 x i8>@test_int_x86_avx512_pmovus_wb_512(<32 x i16> %x0, <32 x i8> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmovus_wb_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8>
; CHECK-NEXT: [[TMP4:%.*]] = select <32 x i1> splat (i1 true), <32 x i8> [[TMP3]], <32 x i8> [[TMP2]]
@@ -1966,9 +1966,9 @@ define <32 x i8>@test_int_x86_avx512_pmovus_wb_512(<32 x i16> %x0, <32 x i8> %x1
define <32 x i8>@test_int_x86_avx512_mask_pmovus_wb_512(<32 x i16> %x0, <32 x i8> %x1, i32 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_wb_512(
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32 [[X2:%.*]] to <32 x i1>
; CHECK-NEXT: [[TMP4:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8>
@@ -1989,7 +1989,7 @@ define <32 x i8>@test_int_x86_avx512_mask_pmovus_wb_512(<32 x i16> %x0, <32 x i8
define <32 x i8>@test_int_x86_avx512_maskz_pmovus_wb_512(<32 x i16> %x0, i32 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovus_wb_512(
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[X2:%.*]] to <32 x i1>
@@ -2014,8 +2014,8 @@ declare void @llvm.x86.avx512.mask.pmovus.wb.mem.512(ptr %ptr, <32 x i16>, i32)
define void @test_int_x86_avx512_mask_pmovus_wb_mem_512(ptr %ptr, <32 x i16> %x1, i32 %x2) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_wb_mem_512(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP2]] to i512
@@ -2051,7 +2051,7 @@ declare <32 x i16> @llvm.x86.avx512.pmaddubs.w.512(<64 x i8>, <64 x i8>)
define <32 x i16> @test_int_x86_avx512_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmaddubs_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <64 x i8> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer
@@ -2077,9 +2077,9 @@ define <32 x i16> @test_int_x86_avx512_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> %
define <32 x i16> @test_int_x86_avx512_mask_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x2, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmaddubs_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <64 x i8> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer
@@ -2117,7 +2117,7 @@ declare <16 x i32> @llvm.x86.avx512.pmaddw.d.512(<32 x i16>, <32 x i16>)
define <16 x i32> @test_int_x86_avx512_pmaddw_d_512(<32 x i16> %x0, <32 x i16> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_pmaddw_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i16> [[TMP2]], zeroinitializer
@@ -2143,9 +2143,9 @@ define <16 x i32> @test_int_x86_avx512_pmaddw_d_512(<32 x i16> %x0, <32 x i16> %
define <16 x i32> @test_int_x86_avx512_mask_pmaddw_d_512(<32 x i16> %x0, <32 x i16> %x1, <16 x i32> %x2, i16 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_pmaddw_d_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <32 x i16> [[TMP2]], zeroinitializer
@@ -2183,9 +2183,9 @@ declare <32 x i16> @llvm.x86.avx512.dbpsadbw.512(<64 x i8>, <64 x i8>, i32)
define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x3, i32 %x4) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_dbpsadbw_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <64 x i8> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0
@@ -2293,7 +2293,7 @@ define <32 x i16> @test_x86_avx512_psrlv_w_512_const() optsize #0 {
define <32 x i16>@test_int_x86_avx512_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_psrlv32hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -2307,9 +2307,9 @@ define <32 x i16>@test_int_x86_avx512_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1)
define <32 x i16>@test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_psrlv32hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -2333,8 +2333,8 @@ define <32 x i16>@test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16>
define <32 x i16>@test_int_x86_avx512_maskz_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_psrlv32hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -2360,7 +2360,7 @@ declare <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16>, <32 x i16>)
define <32 x i16>@test_int_x86_avx512_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_psrav32_hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -2374,9 +2374,9 @@ define <32 x i16>@test_int_x86_avx512_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1)
define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_psrav32_hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -2400,8 +2400,8 @@ define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16>
define <32 x i16>@test_int_x86_avx512_maskz_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_psrav32_hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -2436,7 +2436,7 @@ define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi_const(<32 x i16> %x0, <32
define <32 x i16>@test_int_x86_avx512_psllv32hi(<32 x i16> %x0, <32 x i16> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_psllv32hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -2450,9 +2450,9 @@ define <32 x i16>@test_int_x86_avx512_psllv32hi(<32 x i16> %x0, <32 x i16> %x1)
define <32 x i16>@test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_psllv32hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -2476,8 +2476,8 @@ define <32 x i16>@test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16>
define <32 x i16>@test_int_x86_avx512_maskz_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_psllv32hi(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -2503,7 +2503,7 @@ declare <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16>, <32 x i16>)
define <32 x i16>@test_int_x86_avx512_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1) #0 {
; CHECK-LABEL: @test_int_x86_avx512_permvar_hi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -2517,9 +2517,9 @@ define <32 x i16>@test_int_x86_avx512_permvar_hi_512(<32 x i16> %x0, <32 x i16>
define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_hi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -2543,8 +2543,8 @@ define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x
define <32 x i16>@test_int_x86_avx512_maskz_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) #0 {
; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_hi_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]])
@@ -2568,7 +2568,7 @@ define <32 x i16>@test_int_x86_avx512_maskz_permvar_hi_512(<32 x i16> %x0, <32 x
define <32 x i16> @test_x86_avx512_psll_w_512(<32 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psll_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -2587,9 +2587,9 @@ define <32 x i16> @test_x86_avx512_psll_w_512(<32 x i16> %a0, <8 x i16> %a1) #0
define <32 x i16> @test_x86_avx512_mask_psll_w_512(<32 x i16> %a0, <8 x i16> %a1, <32 x i16> %passthru, i32 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psll_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -2618,8 +2618,8 @@ define <32 x i16> @test_x86_avx512_mask_psll_w_512(<32 x i16> %a0, <8 x i16> %a1
define <32 x i16> @test_x86_avx512_maskz_psll_w_512(<32 x i16> %a0, <8 x i16> %a1, i32 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psll_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -2676,8 +2676,8 @@ define <32 x i16> @test_x86_avx512_pslli_w_512(<32 x i16> %a0) #0 {
define <32 x i16> @test_x86_avx512_mask_pslli_w_512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_pslli_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP4]], zeroinitializer
@@ -2701,7 +2701,7 @@ define <32 x i16> @test_x86_avx512_mask_pslli_w_512(<32 x i16> %a0, <32 x i16> %
define <32 x i16> @test_x86_avx512_maskz_pslli_w_512(<32 x i16> %a0, i32 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_pslli_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP3]], zeroinitializer
@@ -2728,7 +2728,7 @@ declare <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16>, i32) nounwind readno
define <32 x i16> @test_x86_avx512_psra_w_512(<32 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psra_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -2747,9 +2747,9 @@ define <32 x i16> @test_x86_avx512_psra_w_512(<32 x i16> %a0, <8 x i16> %a1) #0
define <32 x i16> @test_x86_avx512_mask_psra_w_512(<32 x i16> %a0, <8 x i16> %a1, <32 x i16> %passthru, i32 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psra_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -2778,8 +2778,8 @@ define <32 x i16> @test_x86_avx512_mask_psra_w_512(<32 x i16> %a0, <8 x i16> %a1
define <32 x i16> @test_x86_avx512_maskz_psra_w_512(<32 x i16> %a0, <8 x i16> %a1, i32 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psra_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -2824,8 +2824,8 @@ define <32 x i16> @test_x86_avx512_psrai_w_512(<32 x i16> %a0) #0 {
define <32 x i16> @test_x86_avx512_mask_psrai_w_512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrai_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrai.w.512(<32 x i16> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP4]], zeroinitializer
@@ -2849,7 +2849,7 @@ define <32 x i16> @test_x86_avx512_mask_psrai_w_512(<32 x i16> %a0, <32 x i16> %
define <32 x i16> @test_x86_avx512_maskz_psrai_w_512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrai_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psrai.w.512(<32 x i16> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP3]], zeroinitializer
@@ -2876,7 +2876,7 @@ declare <32 x i16> @llvm.x86.avx512.psrai.w.512(<32 x i16>, i32) nounwind readno
define <32 x i16> @test_x86_avx512_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx512_psrl_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -2895,9 +2895,9 @@ define <32 x i16> @test_x86_avx512_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1) #0
define <32 x i16> @test_x86_avx512_mask_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1, <32 x i16> %passthru, i32 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrl_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64
@@ -2926,8 +2926,8 @@ define <32 x i16> @test_x86_avx512_mask_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1
define <32 x i16> @test_x86_avx512_maskz_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1, i32 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrl_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64
@@ -2957,7 +2957,7 @@ declare <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16>, <8 x i16>) nounwind r
define <32 x i16> @test_x86_avx512_psrl_w_512_load(<32 x i16> %a0, ptr %p) #0 {
; CHECK-LABEL: @test_x86_avx512_psrl_w_512_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -3003,8 +3003,8 @@ define <32 x i16> @test_x86_avx512_psrli_w_512(<32 x i16> %a0) #0 {
define <32 x i16> @test_x86_avx512_mask_psrli_w_512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_mask_psrli_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrli.w.512(<32 x i16> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP4]], zeroinitializer
@@ -3028,7 +3028,7 @@ define <32 x i16> @test_x86_avx512_mask_psrli_w_512(<32 x i16> %a0, <32 x i16> %
define <32 x i16> @test_x86_avx512_maskz_psrli_w_512(<32 x i16> %a0, i32 %mask) #0 {
; CHECK-LABEL: @test_x86_avx512_maskz_psrli_w_512(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psrli.w.512(<32 x i16> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP3]], zeroinitializer
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-intrinsics.ll
index 69d4900..a79e293 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-intrinsics.ll
@@ -22,7 +22,7 @@ define <32 x half> @test_int_x86_avx512fp16_add_ph_512(<32 x half> %x1, <32 x ha
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_add_ph_512(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <32 x i16> [[_MSPROP]], zeroinitializer
@@ -37,9 +37,9 @@ define <32 x half> @test_int_x86_avx512fp16_add_ph_512(<32 x half> %x1, <32 x ha
define <32 x half> @test_int_x86_avx512fp16_mask_add_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_mask_add_ph_512(
; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
@@ -67,10 +67,10 @@ define <32 x half> @test_int_x86_avx512fp16_mask_add_ph_512(<32 x half> %src, <3
define <32 x half> @test_int_x86_avx512fp16_maskz_add_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_maskz_add_ph_512(
; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1>
@@ -123,10 +123,10 @@ define <32 x half> @test_int_x86_avx512fp16_maskz_add_ph_512(<32 x half> %src, <
define <32 x half> @test_int_x86_avx512fp16_add_ph_512_round(<32 x half> %x1, <32 x half> %x2, <32 x half> %src, i32 %msk, ptr %ptr) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_add_ph_512_round(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], <32 x half> [[SRC:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1>
@@ -156,7 +156,7 @@ define <32 x half> @test_int_x86_avx512fp16_sub_ph_512(<32 x half> %x1, <32 x ha
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_sub_ph_512(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <32 x i16> [[_MSPROP]], zeroinitializer
@@ -171,9 +171,9 @@ define <32 x half> @test_int_x86_avx512fp16_sub_ph_512(<32 x half> %x1, <32 x ha
define <32 x half> @test_int_x86_avx512fp16_mask_sub_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_mask_sub_ph_512(
; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
@@ -201,10 +201,10 @@ define <32 x half> @test_int_x86_avx512fp16_mask_sub_ph_512(<32 x half> %src, <3
define <32 x half> @test_int_x86_avx512fp16_maskz_sub_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_maskz_sub_ph_512(
; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1>
@@ -257,10 +257,10 @@ define <32 x half> @test_int_x86_avx512fp16_maskz_sub_ph_512(<32 x half> %src, <
define <32 x half> @test_int_x86_avx512fp16_sub_ph_512_round(<32 x half> %x1, <32 x half> %x2, <32 x half> %src, i32 %msk, ptr %ptr) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_sub_ph_512_round(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], <32 x half> [[SRC:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1>
@@ -290,7 +290,7 @@ define <32 x half> @test_int_x86_avx512fp16_mul_ph_512(<32 x half> %x1, <32 x ha
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_mul_ph_512(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <32 x i16> [[_MSPROP]], zeroinitializer
@@ -305,9 +305,9 @@ define <32 x half> @test_int_x86_avx512fp16_mul_ph_512(<32 x half> %x1, <32 x ha
define <32 x half> @test_int_x86_avx512fp16_mask_mul_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_mask_mul_ph_512(
; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
@@ -335,10 +335,10 @@ define <32 x half> @test_int_x86_avx512fp16_mask_mul_ph_512(<32 x half> %src, <3
define <32 x half> @test_int_x86_avx512fp16_maskz_mul_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_maskz_mul_ph_512(
; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1>
@@ -391,10 +391,10 @@ define <32 x half> @test_int_x86_avx512fp16_maskz_mul_ph_512(<32 x half> %src, <
define <32 x half> @test_int_x86_avx512fp16_mul_ph_512_round(<32 x half> %x1, <32 x half> %x2, <32 x half> %src, i32 %msk, ptr %ptr) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_mul_ph_512_round(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], <32 x half> [[SRC:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1>
@@ -424,7 +424,7 @@ define <32 x half> @test_int_x86_avx512fp16_div_ph_512(<32 x half> %x1, <32 x ha
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_div_ph_512(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <32 x i16> [[_MSPROP]], zeroinitializer
@@ -439,9 +439,9 @@ define <32 x half> @test_int_x86_avx512fp16_div_ph_512(<32 x half> %x1, <32 x ha
define <32 x half> @test_int_x86_avx512fp16_mask_div_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_mask_div_ph_512(
; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
@@ -469,10 +469,10 @@ define <32 x half> @test_int_x86_avx512fp16_mask_div_ph_512(<32 x half> %src, <3
define <32 x half> @test_int_x86_avx512fp16_maskz_div_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_maskz_div_ph_512(
; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1>
@@ -525,10 +525,10 @@ define <32 x half> @test_int_x86_avx512fp16_maskz_div_ph_512(<32 x half> %src, <
define <32 x half> @test_int_x86_avx512fp16_div_ph_512_round(<32 x half> %x1, <32 x half> %x2, <32 x half> %src, i32 %msk, ptr %ptr) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_div_ph_512_round(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], <32 x half> [[SRC:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1>
@@ -558,7 +558,7 @@ define <32 x half> @test_min_ph(<32 x half> %x1, <32 x half> %x2) #0 {
; CHECK-LABEL: define <32 x half> @test_min_ph(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[_MSPROP]] to <32 x i1>
@@ -583,7 +583,7 @@ define <32 x half> @test_int_x86_avx512fp16_min_ph_512_sae(<32 x half> %x1, <32
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_min_ph_512_sae(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <32 x i16> [[_MSPROP]], zeroinitializer
@@ -598,9 +598,9 @@ define <32 x half> @test_int_x86_avx512fp16_min_ph_512_sae(<32 x half> %x1, <32
define <32 x half> @test_int_x86_avx512fp16_maskz_min_ph_512_sae(<32 x half> %x1, <32 x half> %x2, i32 %msk) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_maskz_min_ph_512_sae(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1>
@@ -629,7 +629,7 @@ define <32 x half> @test_max_ph(<32 x half> %x1, <32 x half> %x2) #0 {
; CHECK-LABEL: define <32 x half> @test_max_ph(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[_MSPROP]] to <32 x i1>
@@ -654,7 +654,7 @@ define <32 x half> @test_int_x86_avx512fp16_max_ph_512_sae(<32 x half> %x1, <32
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_max_ph_512_sae(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <32 x i16> [[_MSPROP]], zeroinitializer
@@ -669,9 +669,9 @@ define <32 x half> @test_int_x86_avx512fp16_max_ph_512_sae(<32 x half> %x1, <32
define <32 x half> @test_int_x86_avx512fp16_maskz_max_ph_512_sae(<32 x half> %x1, <32 x half> %x2, i32 %msk) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_maskz_max_ph_512_sae(
; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1>
@@ -700,8 +700,8 @@ define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd(<8 x half> %x0, <8 x do
; CHECK-LABEL: define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x double> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -727,8 +727,8 @@ define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_sae(<8 x half> %x0, <8
; CHECK-LABEL: define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_sae(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x double> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -754,7 +754,7 @@ define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_nomask(<8 x half> %x0,
; CHECK-LABEL: define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_nomask(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x double> [[X1:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -778,8 +778,8 @@ define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_load(ptr %px0, <8 x dou
; CHECK-LABEL: define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_load(
; CHECK-SAME: ptr [[PX0:%.*]], <8 x double> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -819,8 +819,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph(<8 x double> %x0, <8 x ha
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph(
; CHECK-SAME: <8 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -846,8 +846,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_r(<8 x double> %x0, <8 x
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_r(
; CHECK-SAME: <8 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -873,8 +873,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_load(ptr %px0, <8 x half>
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_load(
; CHECK-SAME: ptr [[PX0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -914,9 +914,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round(<8 x half> %x0,
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round(
; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x float> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -945,9 +945,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round_r(<8 x half> %x0
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round_r(
; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x float> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -976,8 +976,8 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round_nomask(<8 x half
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round_nomask(
; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x float> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1004,8 +1004,8 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round_z(<8 x half> %x0
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round_z(
; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x float> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1033,9 +1033,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round(<8 x half> %x0,
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round(
; CHECK-SAME: <8 x half> [[X0:%.*]], <2 x double> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1064,9 +1064,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round_r(<8 x half> %x0
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round_r(
; CHECK-SAME: <8 x half> [[X0:%.*]], <2 x double> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1095,8 +1095,8 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round_nomask(<8 x half
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round_nomask(
; CHECK-SAME: <8 x half> [[X0:%.*]], <2 x double> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1123,8 +1123,8 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round_z(<8 x half> %x0
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round_z(
; CHECK-SAME: <8 x half> [[X0:%.*]], <2 x double> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1152,9 +1152,9 @@ define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round(<4 x float> %x0
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round(
; CHECK-SAME: <4 x float> [[X0:%.*]], <8 x half> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1183,9 +1183,9 @@ define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round_r(<4 x float> %
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round_r(
; CHECK-SAME: <4 x float> [[X0:%.*]], <8 x half> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1214,8 +1214,8 @@ define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round_nomask(<4 x flo
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round_nomask(
; CHECK-SAME: <4 x float> [[X0:%.*]], <8 x half> [[X1:%.*]], <4 x float> [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1242,8 +1242,8 @@ define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round_z(<4 x float> %
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round_z(
; CHECK-SAME: <4 x float> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1271,9 +1271,9 @@ define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round(<2 x double> %
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round(
; CHECK-SAME: <2 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1302,9 +1302,9 @@ define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round_r(<2 x double>
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round_r(
; CHECK-SAME: <2 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1333,8 +1333,8 @@ define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round_nomask(<2 x do
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round_nomask(
; CHECK-SAME: <2 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], <2 x double> [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1361,8 +1361,8 @@ define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round_z(<2 x double>
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round_z(
; CHECK-SAME: <2 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1404,8 +1404,8 @@ define <16 x float> @test_int_x86_avx512_mask_cvt_ph2psx_512(<16 x half> %x0, <1
; CHECK-LABEL: define <16 x float> @test_int_x86_avx512_mask_cvt_ph2psx_512(
; CHECK-SAME: <16 x half> [[X0:%.*]], <16 x float> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -1431,7 +1431,7 @@ define <16 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_512(<16 x half> %x0, i
; CHECK-LABEL: define <16 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_512(
; CHECK-SAME: <16 x half> [[X0:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i16> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1468,8 +1468,8 @@ define <16 x float> @test_int_x86_avx512_mask_cvt_ph2psx_512r(<16 x half> %x0, <
; CHECK-LABEL: define <16 x float> @test_int_x86_avx512_mask_cvt_ph2psx_512r(
; CHECK-SAME: <16 x half> [[X0:%.*]], <16 x float> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -1495,7 +1495,7 @@ define <16 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_512r(<16 x half> %x0,
; CHECK-LABEL: define <16 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_512r(
; CHECK-SAME: <16 x half> [[X0:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i16> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1534,8 +1534,8 @@ define <16 x half> @test_int_x86_avx512_mask_cvt_ps2phx_512(<16 x float> %x0, <1
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_ps2phx_512(
; CHECK-SAME: <16 x float> [[X0:%.*]], <16 x half> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -1561,7 +1561,7 @@ define <16 x half> @test_int_x86_avx512_maskz_cvt_ps2phx_512(<16 x float> %x0, i
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_maskz_cvt_ps2phx_512(
; CHECK-SAME: <16 x float> [[X0:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -1584,8 +1584,8 @@ define <16 x half> @test_int_x86_avx512_mask_cvt_ps2phx_512r(<16 x float> %x0, <
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_ps2phx_512r(
; CHECK-SAME: <16 x float> [[X0:%.*]], <16 x half> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -1622,3 +1622,6 @@ define <16 x half> @test_int_x86_avx512_mask_cvt_ps2phx_512r(<16 x float> %x0, <
}
attributes #0 = { sanitize_memory }
+;.
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
+;.
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-vl-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-vl-intrinsics.ll
index e67e5e7..c0ba3d5 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-vl-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-vl-intrinsics.ll
@@ -32,7 +32,7 @@ define <16 x half> @test_int_x86_avx512fp16_add_ph_256(<16 x half> %x1, <16 x ha
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_add_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = fadd <16 x half> [[X1]], [[X2]]
@@ -46,11 +46,11 @@ define <16 x half> @test_int_x86_avx512fp16_add_ph_256(<16 x half> %x1, <16 x ha
define <16 x half> @test_int_x86_avx512fp16_mask_add_ph_256(<16 x half> %x1, <16 x half> %x2, <16 x half> %src, i16 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_mask_add_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], <16 x half> [[SRC:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1>
@@ -103,9 +103,9 @@ define <16 x half> @test_int_x86_avx512fp16_mask_add_ph_256(<16 x half> %x1, <16
define <16 x half> @test_int_x86_avx512fp16_maskz_add_ph_256(<16 x half> %x1, <16 x half> %x2, i16 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_maskz_add_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1>
@@ -131,7 +131,7 @@ define <8 x half> @test_int_x86_avx512fp16_add_ph_128(<8 x half> %x1, <8 x half>
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_add_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = fadd <8 x half> [[X1]], [[X2]]
@@ -145,11 +145,11 @@ define <8 x half> @test_int_x86_avx512fp16_add_ph_128(<8 x half> %x1, <8 x half>
define <8 x half> @test_int_x86_avx512fp16_mask_add_ph_128(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_add_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -202,9 +202,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_add_ph_128(<8 x half> %x1, <8 x
define <8 x half> @test_int_x86_avx512fp16_maskz_add_ph_128(<8 x half> %x1, <8 x half> %x2, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_maskz_add_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -230,7 +230,7 @@ define <16 x half> @test_int_x86_avx512fp16_sub_ph_256(<16 x half> %x1, <16 x ha
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_sub_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = fsub <16 x half> [[X1]], [[X2]]
@@ -244,11 +244,11 @@ define <16 x half> @test_int_x86_avx512fp16_sub_ph_256(<16 x half> %x1, <16 x ha
define <16 x half> @test_int_x86_avx512fp16_mask_sub_ph_256(<16 x half> %x1, <16 x half> %x2, <16 x half> %src, i16 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_mask_sub_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], <16 x half> [[SRC:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1>
@@ -301,9 +301,9 @@ define <16 x half> @test_int_x86_avx512fp16_mask_sub_ph_256(<16 x half> %x1, <16
define <16 x half> @test_int_x86_avx512fp16_maskz_sub_ph_256(<16 x half> %x1, <16 x half> %x2, i16 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_maskz_sub_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1>
@@ -329,7 +329,7 @@ define <8 x half> @test_int_x86_avx512fp16_sub_ph_128(<8 x half> %x1, <8 x half>
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_sub_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = fsub <8 x half> [[X1]], [[X2]]
@@ -343,11 +343,11 @@ define <8 x half> @test_int_x86_avx512fp16_sub_ph_128(<8 x half> %x1, <8 x half>
define <8 x half> @test_int_x86_avx512fp16_mask_sub_ph_128(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_sub_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -400,9 +400,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_sub_ph_128(<8 x half> %x1, <8 x
define <8 x half> @test_int_x86_avx512fp16_maskz_sub_ph_128(<8 x half> %x1, <8 x half> %x2, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_maskz_sub_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -428,7 +428,7 @@ define <16 x half> @test_int_x86_avx512fp16_mul_ph_256(<16 x half> %x1, <16 x ha
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_mul_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = fmul <16 x half> [[X1]], [[X2]]
@@ -442,11 +442,11 @@ define <16 x half> @test_int_x86_avx512fp16_mul_ph_256(<16 x half> %x1, <16 x ha
define <16 x half> @test_int_x86_avx512fp16_mask_mul_ph_256(<16 x half> %x1, <16 x half> %x2, <16 x half> %src, i16 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_mask_mul_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], <16 x half> [[SRC:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1>
@@ -499,9 +499,9 @@ define <16 x half> @test_int_x86_avx512fp16_mask_mul_ph_256(<16 x half> %x1, <16
define <16 x half> @test_int_x86_avx512fp16_maskz_mul_ph_256(<16 x half> %x1, <16 x half> %x2, i16 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_maskz_mul_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1>
@@ -527,7 +527,7 @@ define <8 x half> @test_int_x86_avx512fp16_mul_ph_128(<8 x half> %x1, <8 x half>
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mul_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = fmul <8 x half> [[X1]], [[X2]]
@@ -541,11 +541,11 @@ define <8 x half> @test_int_x86_avx512fp16_mul_ph_128(<8 x half> %x1, <8 x half>
define <8 x half> @test_int_x86_avx512fp16_mask_mul_ph_128(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_mul_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -598,9 +598,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_mul_ph_128(<8 x half> %x1, <8 x
define <8 x half> @test_int_x86_avx512fp16_maskz_mul_ph_128(<8 x half> %x1, <8 x half> %x2, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_maskz_mul_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -626,7 +626,7 @@ define <16 x half> @test_int_x86_avx512fp16_div_ph_256(<16 x half> %x1, <16 x ha
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_div_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = fdiv <16 x half> [[X1]], [[X2]]
@@ -641,7 +641,7 @@ define <16 x half> @test_int_x86_avx512fp16_div_ph_256_fast(<16 x half> %x1, <16
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_div_ph_256_fast(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = fdiv fast <16 x half> [[X1]], [[X2]]
@@ -655,11 +655,11 @@ define <16 x half> @test_int_x86_avx512fp16_div_ph_256_fast(<16 x half> %x1, <16
define <16 x half> @test_int_x86_avx512fp16_mask_div_ph_256(<16 x half> %x1, <16 x half> %x2, <16 x half> %src, i16 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_mask_div_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], <16 x half> [[SRC:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1>
@@ -712,9 +712,9 @@ define <16 x half> @test_int_x86_avx512fp16_mask_div_ph_256(<16 x half> %x1, <16
define <16 x half> @test_int_x86_avx512fp16_maskz_div_ph_256(<16 x half> %x1, <16 x half> %x2, i16 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_maskz_div_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1>
@@ -740,7 +740,7 @@ define <8 x half> @test_int_x86_avx512fp16_div_ph_128(<8 x half> %x1, <8 x half>
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_div_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = fdiv <8 x half> [[X1]], [[X2]]
@@ -755,7 +755,7 @@ define <8 x half> @test_int_x86_avx512fp16_div_ph_128_fast(<8 x half> %x1, <8 x
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_div_ph_128_fast(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = fdiv fast <8 x half> [[X1]], [[X2]]
@@ -769,11 +769,11 @@ define <8 x half> @test_int_x86_avx512fp16_div_ph_128_fast(<8 x half> %x1, <8 x
define <8 x half> @test_int_x86_avx512fp16_mask_div_ph_128(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_div_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -826,9 +826,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_div_ph_128(<8 x half> %x1, <8 x
define <8 x half> @test_int_x86_avx512fp16_maskz_div_ph_128(<8 x half> %x1, <8 x half> %x2, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_maskz_div_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -854,7 +854,7 @@ define <16 x half> @test_min_ph_256(<16 x half> %x1, <16 x half> %x2) #0 {
; CHECK-LABEL: define <16 x half> @test_min_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = trunc <16 x i16> [[_MSPROP]] to <16 x i1>
@@ -879,7 +879,7 @@ define <16 x half> @test_max_ph_256(<16 x half> %x1, <16 x half> %x2) #0 {
; CHECK-LABEL: define <16 x half> @test_max_ph_256(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = trunc <16 x i16> [[_MSPROP]] to <16 x i1>
@@ -904,7 +904,7 @@ define <8 x half> @test_min_ph_128(<8 x half> %x1, <8 x half> %x2) #0 {
; CHECK-LABEL: define <8 x half> @test_min_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i16> [[_MSPROP]] to <8 x i1>
@@ -929,7 +929,7 @@ define <8 x half> @test_max_ph_128(<8 x half> %x1, <8 x half> %x2) #0 {
; CHECK-LABEL: define <8 x half> @test_max_ph_128(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i16> [[_MSPROP]] to <8 x i1>
@@ -957,7 +957,7 @@ define <8 x half> @test_max_ph_128_2(<8 x half> %x1, <8 x half> %x2) #0 {
; CHECK-LABEL: define <8 x half> @test_max_ph_128_2(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES0:%.*]] = call <8 x half> @llvm.x86.avx512fp16.max.ph.128(<8 x half> [[X1]], <8 x half> [[X2]])
@@ -972,7 +972,7 @@ define <16 x half> @test_max_ph_256_2(<16 x half> %x1, <16 x half> %x2) #0 {
; CHECK-LABEL: define <16 x half> @test_max_ph_256_2(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES0:%.*]] = call <16 x half> @llvm.x86.avx512fp16.max.ph.256(<16 x half> [[X1]], <16 x half> [[X2]])
@@ -990,7 +990,7 @@ define <8 x half> @test_min_ph_128_2(<8 x half> %x1, <8 x half> %x2) #0 {
; CHECK-LABEL: define <8 x half> @test_min_ph_128_2(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES0:%.*]] = call <8 x half> @llvm.x86.avx512fp16.min.ph.128(<8 x half> [[X1]], <8 x half> [[X2]])
@@ -1005,7 +1005,7 @@ define <16 x half> @test_min_ph_256_2(<16 x half> %x1, <16 x half> %x2) #0 {
; CHECK-LABEL: define <16 x half> @test_min_ph_256_2(
; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES0:%.*]] = call <16 x half> @llvm.x86.avx512fp16.min.ph.256(<16 x half> [[X1]], <16 x half> [[X2]])
@@ -1022,8 +1022,8 @@ define <4 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_256(<8 x half> %x0, <4
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_256(
; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x double> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1049,7 +1049,7 @@ define <4 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_256_nomask(<8 x half> %
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_256_nomask(
; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x double> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1075,8 +1075,8 @@ define <2 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_128(<8 x half> %x0, <2
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_128(
; CHECK-SAME: <8 x half> [[X0:%.*]], <2 x double> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1102,7 +1102,7 @@ define <2 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_128_nomask(<8 x half> %
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_128_nomask(
; CHECK-SAME: <8 x half> [[X0:%.*]], <2 x double> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1128,8 +1128,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_256(<4 x double> %x0, <8
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -1155,8 +1155,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_256_load(ptr %px0, <8 x h
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_256_load(
; CHECK-SAME: ptr [[PX0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1196,8 +1196,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_128(<2 x double> %x0, <8
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1223,8 +1223,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_128_load(ptr %px0, <8 x h
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_128_load(
; CHECK-SAME: ptr [[PX0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -1278,8 +1278,8 @@ define <4 x i32> @test_int_x86_avx512_mask_cvt_ph2udq_128(<8 x half> %x0, <4 x i
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_ph2udq_128(
; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1305,7 +1305,7 @@ define <4 x i32> @test_int_x86_avx512_maskz_cvt_ph2udq_128(<8 x half> %x0, i8 %x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_cvt_ph2udq_128(
; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1344,8 +1344,8 @@ define <8 x i32> @test_int_x86_avx512_mask_cvt_ph2udq_256(<8 x half> %x0, <8 x i
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_cvt_ph2udq_256(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1371,7 +1371,7 @@ define <8 x i32> @test_int_x86_avx512_maskz_cvt_ph2udq_256(<8 x half> %x0, i8 %x
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_maskz_cvt_ph2udq_256(
; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1410,8 +1410,8 @@ define <4 x i32> @test_int_x86_avx512_mask_cvtt_ph2dq_128(<8 x half> %x0, <4 x i
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_ph2dq_128(
; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1437,7 +1437,7 @@ define <4 x i32> @test_int_x86_avx512_maskz_cvtt_ph2dq_128(<8 x half> %x0, i8 %x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_cvtt_ph2dq_128(
; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1476,8 +1476,8 @@ define <8 x i32> @test_int_x86_avx512_mask_cvtt_ph2dq_256(<8 x half> %x0, <8 x i
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_cvtt_ph2dq_256(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1503,7 +1503,7 @@ define <8 x i32> @test_int_x86_avx512_maskz_cvtt_ph2dq_256(<8 x half> %x0, i8 %x
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_maskz_cvtt_ph2dq_256(
; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1542,8 +1542,8 @@ define <4 x i32> @test_int_x86_avx512_mask_cvtt_ph2udq_128(<8 x half> %x0, <4 x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_ph2udq_128(
; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1569,7 +1569,7 @@ define <4 x i32> @test_int_x86_avx512_maskz_cvtt_ph2udq_128(<8 x half> %x0, i8 %
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_cvtt_ph2udq_128(
; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1608,8 +1608,8 @@ define <8 x i32> @test_int_x86_avx512_mask_cvtt_ph2udq_256(<8 x half> %x0, <8 x
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_cvtt_ph2udq_256(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1635,7 +1635,7 @@ define <8 x i32> @test_int_x86_avx512_maskz_cvtt_ph2udq_256(<8 x half> %x0, i8 %
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_maskz_cvtt_ph2udq_256(
; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1674,8 +1674,8 @@ define <4 x float> @test_int_x86_avx512_mask_cvt_ph2psx_128(<8 x half> %x0, <4 x
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_cvt_ph2psx_128(
; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x float> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1701,7 +1701,7 @@ define <4 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_128(<8 x half> %x0, i8
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_128(
; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1740,8 +1740,8 @@ define <8 x float> @test_int_x86_avx512_mask_cvt_ph2psx_256(<8 x half> %x0, <8 x
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask_cvt_ph2psx_256(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x float> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1767,7 +1767,7 @@ define <8 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_256(<8 x half> %x0, i8
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_256(
; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1792,8 +1792,8 @@ define <8 x half> @test_int_x86_avx512_mask_cvt_ps2phx_128(<4 x float> %x0, <8 x
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_ps2phx_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1849,8 +1849,8 @@ define <8 x half> @test_int_x86_avx512_mask_cvt_ps2phx_256(<8 x float> %x0, <8 x
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_ps2phx_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -1876,7 +1876,7 @@ define <8 x half> @test_int_x86_avx512_maskz_cvt_ps2phx_256(<8 x float> %x0, i8
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_maskz_cvt_ps2phx_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1896,3 +1896,6 @@ define <8 x half> @test_int_x86_avx512_maskz_cvt_ps2phx_256(<8 x float> %x0, i8
}
attributes #0 = { sanitize_memory }
+;.
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
+;.
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-intrinsics.ll
index 8723b10..e5d1af3 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-intrinsics.ll
@@ -61,7 +61,7 @@ define i32 @test_x86_avx512fp16_ucomi_sh_lt(<8 x half> %a0, <8 x half> %a1) #0 {
; CHECK-LABEL: define i32 @test_x86_avx512fp16_ucomi_sh_lt(
; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[A1:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -100,7 +100,7 @@ define <32 x half> @test_sqrt_ph_512_fast(<32 x half> %a0, <32 x half> %a1) #0 {
; CHECK-LABEL: define <32 x half> @test_sqrt_ph_512_fast(
; CHECK-SAME: <32 x half> [[A0:%.*]], <32 x half> [[A1:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call fast <32 x half> @llvm.sqrt.v32f16(<32 x half> [[A0]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP2]], [[TMP1]]
@@ -145,8 +145,8 @@ define <32 x half> @test_mask_sqrt_ph_512(<32 x half> %a0, <32 x half> %passthru
; CHECK-LABEL: define <32 x half> @test_mask_sqrt_ph_512(
; CHECK-SAME: <32 x half> [[A0:%.*]], <32 x half> [[PASSTHRU:%.*]], i32 [[MASK:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x half> @llvm.sqrt.v32f16(<32 x half> [[A0]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP2]] to <32 x i1>
@@ -172,7 +172,7 @@ define <32 x half> @test_maskz_sqrt_ph_512(<32 x half> %a0, i32 %mask) #0 {
; CHECK-LABEL: define <32 x half> @test_maskz_sqrt_ph_512(
; CHECK-SAME: <32 x half> [[A0:%.*]], i32 [[MASK:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call <32 x half> @llvm.sqrt.v32f16(<32 x half> [[A0]])
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[TMP2]] to <32 x i1>
@@ -219,8 +219,8 @@ define <32 x half> @test_mask_sqrt_round_ph_512(<32 x half> %a0, <32 x half> %pa
; CHECK-LABEL: define <32 x half> @test_mask_sqrt_round_ph_512(
; CHECK-SAME: <32 x half> [[A0:%.*]], <32 x half> [[PASSTHRU:%.*]], i32 [[MASK:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -253,7 +253,7 @@ define <32 x half> @test_maskz_sqrt_round_ph_512(<32 x half> %a0, i32 %mask) #0
; CHECK-LABEL: define <32 x half> @test_maskz_sqrt_round_ph_512(
; CHECK-SAME: <32 x half> [[A0:%.*]], i32 [[MASK:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <32 x i16> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
@@ -287,9 +287,9 @@ define <8 x half> @test_sqrt_sh(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2,
; CHECK-LABEL: define <8 x half> @test_sqrt_sh(
; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[A1:%.*]], <8 x half> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -318,7 +318,7 @@ define half @test_sqrt_sh2(half %a0, half %a1) #0 {
; CHECK-LABEL: define half @test_sqrt_sh2(
; CHECK-SAME: half [[A0:%.*]], half [[A1:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call fast half @llvm.sqrt.f16(half [[A0]])
; CHECK-NEXT: [[_MSPROP:%.*]] = or i16 [[TMP2]], [[TMP1]]
@@ -350,9 +350,9 @@ define <8 x half> @test_sqrt_sh_r(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2
; CHECK-LABEL: define <8 x half> @test_sqrt_sh_r(
; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[A1:%.*]], <8 x half> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -381,8 +381,8 @@ define <8 x half> @test_sqrt_sh_nomask(<8 x half> %a0, <8 x half> %a1, <8 x half
; CHECK-LABEL: define <8 x half> @test_sqrt_sh_nomask(
; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[A1:%.*]], <8 x half> [[A2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -409,8 +409,8 @@ define <8 x half> @test_sqrt_sh_z(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2
; CHECK-LABEL: define <8 x half> @test_sqrt_sh_z(
; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[A1:%.*]], <8 x half> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -455,7 +455,7 @@ define <8 x half> @test_rsqrt_sh(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2)
; CHECK-LABEL: define <8 x half> @test_rsqrt_sh(
; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[A1:%.*]], <8 x half> [[A2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -481,7 +481,7 @@ define <8 x half> @test_rsqrt_sh(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2)
define <8 x half> @test_rsqrt_sh_load(<8 x half> %a0, ptr %a1ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_rsqrt_sh_load(
; CHECK-SAME: <8 x half> [[A0:%.*]], ptr [[A1PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -514,7 +514,7 @@ define <8 x half> @test_rsqrt_sh_maskz(<8 x half> %a0, i8 %mask) #0 {
; CHECK-LABEL: define <8 x half> @test_rsqrt_sh_maskz(
; CHECK-SAME: <8 x half> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -540,9 +540,9 @@ define <8 x half> @test_rsqrt_sh_mask(<8 x half> %a0, <8 x half> %b0, <8 x half>
; CHECK-LABEL: define <8 x half> @test_rsqrt_sh_mask(
; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[B0:%.*]], <8 x half> [[C0:%.*]], i8 [[MASK:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -675,9 +675,9 @@ declare <32 x half> @llvm.x86.avx512fp16.mask.rcp.ph.512(<32 x half>, <32 x half
define <32 x half> @test_rcp_ph_512(<32 x half> %a0, <32 x half> %a1, i32 %mask) #0 {
; CHECK-LABEL: define <32 x half> @test_rcp_ph_512(
; CHECK-SAME: <32 x half> [[A0:%.*]], <32 x half> [[A1:%.*]], i32 [[MASK:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[MASK]] to <32 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
@@ -725,7 +725,7 @@ define <8 x half> @test_rcp_sh(<8 x half> %a0) #0 {
define <8 x half> @test_rcp_sh_load(<8 x half> %a0, ptr %a1ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_rcp_sh_load(
; CHECK-SAME: <8 x half> [[A0:%.*]], ptr [[A1PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -764,8 +764,8 @@ define <32 x half>@test_int_x86_avx512_mask_reduce_ph_512(<32 x half> %x0, <32 x
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512_mask_reduce_ph_512(
; CHECK-SAME: <32 x half> [[X0:%.*]], <32 x half> [[X2:%.*]], i32 [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -807,9 +807,9 @@ define <8 x half>@test_int_x86_avx512_mask_reduce_sh(<8 x half> %x0, <8 x half>
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_reduce_sh(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]], i8 [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -838,8 +838,8 @@ define <8 x half>@test_int_x86_avx512_mask_reduce_sh_nomask(<8 x half> %x0, <8 x
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_reduce_sh_nomask(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -867,9 +867,9 @@ declare <32 x half> @llvm.x86.avx512fp16.mask.rndscale.ph.512(<32 x half>, i32,
define <32 x half>@test_int_x86_avx512_mask_rndscale_ph_512(<32 x half> %x0, <32 x half> %x2, i32 %x3) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512_mask_rndscale_ph_512(
; CHECK-SAME: <32 x half> [[X0:%.*]], <32 x half> [[X2:%.*]], i32 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[X3]] to <32 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer
@@ -903,9 +903,9 @@ define <8 x half>@test_int_x86_avx512_mask_rndscale_sh(<8 x half> %x0, <8 x half
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_rndscale_sh(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]], i8 [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -934,8 +934,8 @@ define <8 x half>@test_int_x86_avx512_mask_rndscale_sh_nomask(<8 x half> %x0, <8
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_rndscale_sh_nomask(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -964,8 +964,8 @@ define <32 x half>@test_int_x86_avx512_mask_getexp_ph_512(<32 x half> %x0, <32 x
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512_mask_getexp_ph_512(
; CHECK-SAME: <32 x half> [[X0:%.*]], <32 x half> [[X1:%.*]], i32 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -1004,9 +1004,9 @@ define <8 x half>@test_int_x86_avx512_mask_getexp_sh(<8 x half> %x0, <8 x half>
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_getexp_sh(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]], i8 [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1035,8 +1035,8 @@ define <8 x half>@test_int_x86_avx512_mask_getexp_sh_nomask(<8 x half> %x0, <8 x
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_getexp_sh_nomask(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1062,7 +1062,7 @@ define <8 x half>@test_int_x86_avx512_mask_getexp_sh_nomask(<8 x half> %x0, <8 x
define <8 x half>@test_int_x86_avx512_mask_getexp_sh_load(<8 x half> %x0, ptr %x1ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_getexp_sh_load(
; CHECK-SAME: <8 x half> [[X0:%.*]], ptr [[X1PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -1097,8 +1097,8 @@ define <32 x half>@test_int_x86_avx512_mask_getmant_ph_512(<32 x half> %x0, <32
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512_mask_getmant_ph_512(
; CHECK-SAME: <32 x half> [[X0:%.*]], <32 x half> [[X2:%.*]], i32 [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP1]] to i512
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
@@ -1140,9 +1140,9 @@ define <8 x half>@test_int_x86_avx512_mask_getmant_sh(<8 x half> %x0, <8 x half>
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_getmant_sh(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]], i8 [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1171,8 +1171,8 @@ define <8 x half>@test_int_x86_avx512_mask_getmant_sh_nomask(<8 x half> %x0, <8
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_getmant_sh_nomask(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1199,8 +1199,8 @@ define <8 x half>@test_int_x86_avx512_mask_getmant_sh_z(<8 x half> %x0, <8 x hal
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_getmant_sh_z(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1227,10 +1227,10 @@ declare <32 x half> @llvm.x86.avx512fp16.mask.scalef.ph.512(<32 x half>, <32 x h
define <32 x half>@test_int_x86_avx512_mask_scalef_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3) #0 {
; CHECK-LABEL: define <32 x half> @test_int_x86_avx512_mask_scalef_ph_512(
; CHECK-SAME: <32 x half> [[X0:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[X3]] to <32 x i1>
@@ -1278,9 +1278,9 @@ define <8 x half>@test_int_x86_avx512_mask_scalef_sh(<8 x half> %x0, <8 x half>
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_scalef_sh(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]], i8 [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1309,8 +1309,8 @@ define <8 x half>@test_int_x86_avx512_mask_scalef_sh_nomask(<8 x half> %x0, <8 x
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_scalef_sh_nomask(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1336,7 +1336,7 @@ define <8 x half>@test_int_x86_avx512_mask_scalef_sh_nomask(<8 x half> %x0, <8 x
define <8 x half>@test_int_x86_avx512_mask_scalef_sh_load(<8 x half> %x0, ptr %x1ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_scalef_sh_load(
; CHECK-SAME: <8 x half> [[X0:%.*]], ptr [[X1PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -1370,11 +1370,11 @@ declare <8 x half> @llvm.x86.avx512fp16.mask.add.sh.round(<8 x half>, <8 x half>
define <8 x half> @test_int_x86_avx512fp16_mask_add_sh(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_add_sh(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -1456,11 +1456,11 @@ declare <8 x half> @llvm.x86.avx512fp16.mask.sub.sh.round(<8 x half>, <8 x half>
define <8 x half> @test_int_x86_avx512fp16_mask_sub_sh(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_sub_sh(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -1542,11 +1542,11 @@ declare <8 x half> @llvm.x86.avx512fp16.mask.mul.sh.round(<8 x half>, <8 x half>
define <8 x half> @test_int_x86_avx512fp16_mask_mul_sh(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_mul_sh(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -1628,11 +1628,11 @@ declare <8 x half> @llvm.x86.avx512fp16.mask.div.sh.round(<8 x half>, <8 x half>
define <8 x half> @test_int_x86_avx512fp16_mask_div_sh(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_div_sh(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -1714,11 +1714,11 @@ declare <8 x half> @llvm.x86.avx512fp16.mask.min.sh.round(<8 x half>, <8 x half>
define <8 x half> @test_int_x86_avx512fp16_mask_min_sh(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_min_sh(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -1800,11 +1800,11 @@ declare <8 x half> @llvm.x86.avx512fp16.mask.max.sh.round(<8 x half>, <8 x half>
define <8 x half> @test_int_x86_avx512fp16_mask_max_sh(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_max_sh(
; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -1887,8 +1887,8 @@ define i8 @test_int_x86_avx512_mask_cmp_sh(<8 x half> %x0, <8 x half> %x1, i8 %x
; CHECK-LABEL: define i8 @test_int_x86_avx512_mask_cmp_sh(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X3:%.*]], i32 [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -1915,8 +1915,8 @@ define i8 @test_int_x86_avx512_mask_cmp_sh_all(<8 x half> %x0, <8 x half> %x1, i
; CHECK-LABEL: define i8 @test_int_x86_avx512_mask_cmp_sh_all(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X3:%.*]], i32 [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -2001,9 +2001,9 @@ declare <16 x half> @llvm.x86.avx512.sitofp.round.v16f16.v16i32(<16 x i32>, i32)
define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512(<16 x i32> %x0, <16 x half> %x1, i16 %x2) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x half> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i16 [[X2]] to <16 x i1>
@@ -2035,9 +2035,9 @@ define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512(<16 x i32> %x0, <16 x
define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512_r(<16 x i32> %x0, <16 x half> %x1, i16 %x2) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512_r(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x half> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i16 [[X2]] to <16 x i1>
@@ -2089,7 +2089,7 @@ define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512_nomask(<16 x i32> %x0
define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512_z(<16 x i32> %x0, i16 %x2) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512_z(
; CHECK-SAME: <16 x i32> [[X0:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
@@ -2137,9 +2137,9 @@ declare <16 x half> @llvm.x86.avx512.uitofp.round.v16f16.v16i32(<16 x i32>, i32)
define <16 x half> @test_int_x86_avx512_mask_cvt_udq2ph_512_r(<16 x i32> %x0, <16 x half> %x1, i16 %x2) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_udq2ph_512_r(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x half> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i16 [[X2]] to <16 x i1>
@@ -2191,7 +2191,7 @@ define <16 x half> @test_int_x86_avx512_mask_cvt_udq2ph_512_nomask(<16 x i32> %x
define <16 x half> @test_int_x86_avx512_mask_cvt_udq2ph_512_z(<16 x i32> %x0, i16 %x2) #0 {
; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_udq2ph_512_z(
; CHECK-SAME: <16 x i32> [[X0:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1>
@@ -2240,8 +2240,8 @@ define <16 x i32> @test_int_x86_avx512_mask_cvt_ph2dq_512(<16 x half> %x0, <16 x
; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_mask_cvt_ph2dq_512(
; CHECK-SAME: <16 x half> [[X0:%.*]], <16 x i32> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -2283,8 +2283,8 @@ define <16 x i32> @test_int_x86_avx512_mask_cvt_ph2udq_512(<16 x half> %x0, <16
; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_mask_cvt_ph2udq_512(
; CHECK-SAME: <16 x half> [[X0:%.*]], <16 x i32> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -2326,8 +2326,8 @@ define <16 x i32> @test_int_x86_avx512_mask_cvtt_ph2dq_512(<16 x half> %x0, <16
; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_mask_cvtt_ph2dq_512(
; CHECK-SAME: <16 x half> [[X0:%.*]], <16 x i32> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -2369,8 +2369,8 @@ define <16 x i32> @test_int_x86_avx512_mask_cvtt_ph2udq_512(<16 x half> %x0, <16
; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_mask_cvtt_ph2udq_512(
; CHECK-SAME: <16 x half> [[X0:%.*]], <16 x i32> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -2411,9 +2411,9 @@ declare <8 x half> @llvm.x86.avx512.sitofp.round.v8f16.v8i64(<8 x i64>, i32)
define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512(<8 x i64> %x0, <8 x half> %x1, i8 %x2) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512(
; CHECK-SAME: <8 x i64> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i8 [[X2]] to <8 x i1>
@@ -2445,9 +2445,9 @@ define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512(<8 x i64> %x0, <8 x ha
define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512_r(<8 x i64> %x0, <8 x half> %x1, i8 %x2) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512_r(
; CHECK-SAME: <8 x i64> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i8 [[X2]] to <8 x i1>
@@ -2499,7 +2499,7 @@ define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512_nomask(<8 x i64> %x0,
define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512_z(<8 x i64> %x0, i8 %x2) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512_z(
; CHECK-SAME: <8 x i64> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
@@ -2533,9 +2533,9 @@ declare <8 x half> @llvm.x86.avx512.uitofp.round.v8f16.v8i64(<8 x i64>, i32)
define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512(<8 x i64> %x0, <8 x half> %x1, i8 %x2) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512(
; CHECK-SAME: <8 x i64> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i8 [[X2]] to <8 x i1>
@@ -2567,9 +2567,9 @@ define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512(<8 x i64> %x0, <8 x h
define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512_r(<8 x i64> %x0, <8 x half> %x1, i8 %x2) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512_r(
; CHECK-SAME: <8 x i64> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
; CHECK-NEXT: [[MASK:%.*]] = bitcast i8 [[X2]] to <8 x i1>
@@ -2621,7 +2621,7 @@ define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512_nomask(<8 x i64> %x0,
define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512_z(<8 x i64> %x0, i8 %x2) #0 {
; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512_z(
; CHECK-SAME: <8 x i64> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1>
@@ -2656,8 +2656,8 @@ define <8 x i64> @test_int_x86_avx512_mask_cvt_ph2qq_512(<8 x half> %x0, <8 x i6
; CHECK-LABEL: define <8 x i64> @test_int_x86_avx512_mask_cvt_ph2qq_512(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -2699,8 +2699,8 @@ define <8 x i64> @test_int_x86_avx512_mask_cvt_ph2uqq_512(<8 x half> %x0, <8 x i
; CHECK-LABEL: define <8 x i64> @test_int_x86_avx512_mask_cvt_ph2uqq_512(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -2742,8 +2742,8 @@ define <8 x i64> @test_int_x86_avx512_mask_cvtt_ph2uqq_512(<8 x half> %x0, <8 x
; CHECK-LABEL: define <8 x i64> @test_int_x86_avx512_mask_cvtt_ph2uqq_512(
; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -3051,7 +3051,7 @@ define <8 x half> @test_x86_avx512fp16_vcvtsi2sh(<8 x half> %arg0, i32 %arg1) #0
; CHECK-LABEL: define <8 x half> @test_x86_avx512fp16_vcvtsi2sh(
; CHECK-SAME: <8 x half> [[ARG0:%.*]], i32 [[ARG1:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -3089,7 +3089,7 @@ define <8 x half> @test_x86_avx512fp16_vcvtsi642sh(<8 x half> %arg0, i64 %arg1)
; CHECK-LABEL: define <8 x half> @test_x86_avx512fp16_vcvtsi642sh(
; CHECK-SAME: <8 x half> [[ARG0:%.*]], i64 [[ARG1:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -3127,7 +3127,7 @@ define <8 x half> @test_x86_avx512fp16_vcvtusi2sh(<8 x half> %arg0, i32 %arg1) #
; CHECK-LABEL: define <8 x half> @test_x86_avx512fp16_vcvtusi2sh(
; CHECK-SAME: <8 x half> [[ARG0:%.*]], i32 [[ARG1:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -3165,7 +3165,7 @@ define <8 x half> @test_x86_avx512fp16_vcvtusi642sh(<8 x half> %arg0, i64 %arg1)
; CHECK-LABEL: define <8 x half> @test_x86_avx512fp16_vcvtusi642sh(
; CHECK-SAME: <8 x half> [[ARG0:%.*]], i64 [[ARG1:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl-intrinsics.ll
index d598142..f20d368e 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl-intrinsics.ll
@@ -71,9 +71,9 @@ define <2 x double> @test_mask_compress_pd_128(<2 x double> %data, <2 x double>
;
; CHECK-LABEL: define <2 x double> @test_mask_compress_pd_128(
; CHECK-SAME: <2 x double> [[DATA:%.*]], <2 x double> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -106,7 +106,7 @@ define <2 x double> @test_maskz_compress_pd_128(<2 x double> %data, i8 %mask) #0
;
; CHECK-LABEL: define <2 x double> @test_maskz_compress_pd_128(
; CHECK-SAME: <2 x double> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -137,7 +137,7 @@ define <2 x double> @test_compress_pd_128(<2 x double> %data, <2 x double> %data
; CHECK-LABEL: define <2 x double> @test_compress_pd_128(
; CHECK-SAME: <2 x double> [[DATA:%.*]], <2 x double> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -161,9 +161,9 @@ define <4 x float> @test_mask_compress_ps_128(<4 x float> %data, <4 x float> %pa
;
; CHECK-LABEL: define <4 x float> @test_mask_compress_ps_128(
; CHECK-SAME: <4 x float> [[DATA:%.*]], <4 x float> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -196,7 +196,7 @@ define <4 x float> @test_maskz_compress_ps_128(<4 x float> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <4 x float> @test_maskz_compress_ps_128(
; CHECK-SAME: <4 x float> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -227,7 +227,7 @@ define <4 x float> @test_compress_ps_128(<4 x float> %data, <4 x float> %data2)
; CHECK-LABEL: define <4 x float> @test_compress_ps_128(
; CHECK-SAME: <4 x float> [[DATA:%.*]], <4 x float> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP5]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -251,9 +251,9 @@ define <2 x i64> @test_mask_compress_q_128(<2 x i64> %data, <2 x i64> %passthru,
;
; CHECK-LABEL: define <2 x i64> @test_mask_compress_q_128(
; CHECK-SAME: <2 x i64> [[DATA:%.*]], <2 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -286,7 +286,7 @@ define <2 x i64> @test_maskz_compress_q_128(<2 x i64> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <2 x i64> @test_maskz_compress_q_128(
; CHECK-SAME: <2 x i64> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -317,7 +317,7 @@ define <2 x i64> @test_compress_q_128(<2 x i64> %data, <2 x i64> %data2) #0 {
; CHECK-LABEL: define <2 x i64> @test_compress_q_128(
; CHECK-SAME: <2 x i64> [[DATA:%.*]], <2 x i64> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -341,9 +341,9 @@ define <4 x i32> @test_mask_compress_d_128(<4 x i32> %data, <4 x i32> %passthru,
;
; CHECK-LABEL: define <4 x i32> @test_mask_compress_d_128(
; CHECK-SAME: <4 x i32> [[DATA:%.*]], <4 x i32> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -376,7 +376,7 @@ define <4 x i32> @test_maskz_compress_d_128(<4 x i32> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <4 x i32> @test_maskz_compress_d_128(
; CHECK-SAME: <4 x i32> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -407,7 +407,7 @@ define <4 x i32> @test_compress_d_128(<4 x i32> %data, <4 x i32> %data2) #0 {
; CHECK-LABEL: define <4 x i32> @test_compress_d_128(
; CHECK-SAME: <4 x i32> [[DATA:%.*]], <4 x i32> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP5]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -431,7 +431,7 @@ define <2 x double> @test_expand_pd_128(<2 x double> %data, <2 x double> %data2)
; CHECK-LABEL: define <2 x double> @test_expand_pd_128(
; CHECK-SAME: <2 x double> [[DATA:%.*]], <2 x double> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -455,9 +455,9 @@ define <2 x double> @test_mask_expand_pd_128(<2 x double> %data, <2 x double> %p
;
; CHECK-LABEL: define <2 x double> @test_mask_expand_pd_128(
; CHECK-SAME: <2 x double> [[DATA:%.*]], <2 x double> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -490,7 +490,7 @@ define <2 x double> @test_maskz_expand_pd_128(<2 x double> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <2 x double> @test_maskz_expand_pd_128(
; CHECK-SAME: <2 x double> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -521,7 +521,7 @@ define <4 x float> @test_expand_ps_128(<4 x float> %data, <4 x float> %data2) #0
; CHECK-LABEL: define <4 x float> @test_expand_ps_128(
; CHECK-SAME: <4 x float> [[DATA:%.*]], <4 x float> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP5]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -545,9 +545,9 @@ define <4 x float> @test_mask_expand_ps_128(<4 x float> %data, <4 x float> %pass
;
; CHECK-LABEL: define <4 x float> @test_mask_expand_ps_128(
; CHECK-SAME: <4 x float> [[DATA:%.*]], <4 x float> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -580,7 +580,7 @@ define <4 x float> @test_maskz_expand_ps_128(<4 x float> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <4 x float> @test_maskz_expand_ps_128(
; CHECK-SAME: <4 x float> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -611,7 +611,7 @@ define <2 x i64> @test_expand_q_128(<2 x i64> %data, <2 x i64> %data2) #0 {
; CHECK-LABEL: define <2 x i64> @test_expand_q_128(
; CHECK-SAME: <2 x i64> [[DATA:%.*]], <2 x i64> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -635,9 +635,9 @@ define <2 x i64> @test_mask_expand_q_128(<2 x i64> %data, <2 x i64> %passthru, i
;
; CHECK-LABEL: define <2 x i64> @test_mask_expand_q_128(
; CHECK-SAME: <2 x i64> [[DATA:%.*]], <2 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -670,7 +670,7 @@ define <2 x i64> @test_maskz_expand_q_128(<2 x i64> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <2 x i64> @test_maskz_expand_q_128(
; CHECK-SAME: <2 x i64> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -701,7 +701,7 @@ define <4 x i32> @test_expand_d_128(<4 x i32> %data, <4 x i32> %data2) #0 {
; CHECK-LABEL: define <4 x i32> @test_expand_d_128(
; CHECK-SAME: <4 x i32> [[DATA:%.*]], <4 x i32> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP5]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -725,9 +725,9 @@ define <4 x i32> @test_mask_expand_d_128(<4 x i32> %data, <4 x i32> %passthru, i
;
; CHECK-LABEL: define <4 x i32> @test_mask_expand_d_128(
; CHECK-SAME: <4 x i32> [[DATA:%.*]], <4 x i32> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -760,7 +760,7 @@ define <4 x i32> @test_maskz_expand_d_128(<4 x i32> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <4 x i32> @test_maskz_expand_d_128(
; CHECK-SAME: <4 x i32> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -791,9 +791,9 @@ define <4 x double> @test_mask_compress_pd_256(<4 x double> %data, <4 x double>
;
; CHECK-LABEL: define <4 x double> @test_mask_compress_pd_256(
; CHECK-SAME: <4 x double> [[DATA:%.*]], <4 x double> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -826,7 +826,7 @@ define <4 x double> @test_maskz_compress_pd_256(<4 x double> %data, i8 %mask) #0
;
; CHECK-LABEL: define <4 x double> @test_maskz_compress_pd_256(
; CHECK-SAME: <4 x double> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -857,7 +857,7 @@ define <4 x double> @test_compress_pd_256(<4 x double> %data, <4 x double> %data
; CHECK-LABEL: define <4 x double> @test_compress_pd_256(
; CHECK-SAME: <4 x double> [[DATA:%.*]], <4 x double> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP5]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -881,9 +881,9 @@ define <8 x float> @test_mask_compress_ps_256(<8 x float> %data, <8 x float> %pa
;
; CHECK-LABEL: define <8 x float> @test_mask_compress_ps_256(
; CHECK-SAME: <8 x float> [[DATA:%.*]], <8 x float> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -913,7 +913,7 @@ define <8 x float> @test_maskz_compress_ps_256(<8 x float> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <8 x float> @test_maskz_compress_ps_256(
; CHECK-SAME: <8 x float> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -941,7 +941,7 @@ define <8 x float> @test_compress_ps_256(<8 x float> %data, <8 x float> %data2)
; CHECK-LABEL: define <8 x float> @test_compress_ps_256(
; CHECK-SAME: <8 x float> [[DATA:%.*]], <8 x float> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP5]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -965,9 +965,9 @@ define <4 x i64> @test_mask_compress_q_256(<4 x i64> %data, <4 x i64> %passthru,
;
; CHECK-LABEL: define <4 x i64> @test_mask_compress_q_256(
; CHECK-SAME: <4 x i64> [[DATA:%.*]], <4 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -1000,7 +1000,7 @@ define <4 x i64> @test_maskz_compress_q_256(<4 x i64> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <4 x i64> @test_maskz_compress_q_256(
; CHECK-SAME: <4 x i64> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -1031,7 +1031,7 @@ define <4 x i64> @test_compress_q_256(<4 x i64> %data, <4 x i64> %data2) #0 {
; CHECK-LABEL: define <4 x i64> @test_compress_q_256(
; CHECK-SAME: <4 x i64> [[DATA:%.*]], <4 x i64> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP5]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1055,9 +1055,9 @@ define <8 x i32> @test_mask_compress_d_256(<8 x i32> %data, <8 x i32> %passthru,
;
; CHECK-LABEL: define <8 x i32> @test_mask_compress_d_256(
; CHECK-SAME: <8 x i32> [[DATA:%.*]], <8 x i32> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -1087,7 +1087,7 @@ define <8 x i32> @test_maskz_compress_d_256(<8 x i32> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <8 x i32> @test_maskz_compress_d_256(
; CHECK-SAME: <8 x i32> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -1115,7 +1115,7 @@ define <8 x i32> @test_compress_d_256(<8 x i32> %data, <8 x i32> %data2) #0 {
; CHECK-LABEL: define <8 x i32> @test_compress_d_256(
; CHECK-SAME: <8 x i32> [[DATA:%.*]], <8 x i32> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP5]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1139,7 +1139,7 @@ define <4 x double> @test_expand_pd_256(<4 x double> %data, <4 x double> %data2)
; CHECK-LABEL: define <4 x double> @test_expand_pd_256(
; CHECK-SAME: <4 x double> [[DATA:%.*]], <4 x double> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP5]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1163,9 +1163,9 @@ define <4 x double> @test_mask_expand_pd_256(<4 x double> %data, <4 x double> %p
;
; CHECK-LABEL: define <4 x double> @test_mask_expand_pd_256(
; CHECK-SAME: <4 x double> [[DATA:%.*]], <4 x double> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -1198,7 +1198,7 @@ define <4 x double> @test_maskz_expand_pd_256(<4 x double> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <4 x double> @test_maskz_expand_pd_256(
; CHECK-SAME: <4 x double> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -1229,7 +1229,7 @@ define <8 x float> @test_expand_ps_256(<8 x float> %data, <8 x float> %data2) #0
; CHECK-LABEL: define <8 x float> @test_expand_ps_256(
; CHECK-SAME: <8 x float> [[DATA:%.*]], <8 x float> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP5]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1253,9 +1253,9 @@ define <8 x float> @test_mask_expand_ps_256(<8 x float> %data, <8 x float> %pass
;
; CHECK-LABEL: define <8 x float> @test_mask_expand_ps_256(
; CHECK-SAME: <8 x float> [[DATA:%.*]], <8 x float> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -1285,7 +1285,7 @@ define <8 x float> @test_maskz_expand_ps_256(<8 x float> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <8 x float> @test_maskz_expand_ps_256(
; CHECK-SAME: <8 x float> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -1313,7 +1313,7 @@ define <4 x i64> @test_expand_q_256(<4 x i64> %data, <4 x i64> %data2) #0 {
; CHECK-LABEL: define <4 x i64> @test_expand_q_256(
; CHECK-SAME: <4 x i64> [[DATA:%.*]], <4 x i64> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP5]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1337,9 +1337,9 @@ define <4 x i64> @test_mask_expand_q_256(<4 x i64> %data, <4 x i64> %passthru, i
;
; CHECK-LABEL: define <4 x i64> @test_mask_expand_q_256(
; CHECK-SAME: <4 x i64> [[DATA:%.*]], <4 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -1372,7 +1372,7 @@ define <4 x i64> @test_maskz_expand_q_256(<4 x i64> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <4 x i64> @test_maskz_expand_q_256(
; CHECK-SAME: <4 x i64> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -1403,7 +1403,7 @@ define <8 x i32> @test_expand_d_256(<8 x i32> %data, <8 x i32> %data2) #0 {
; CHECK-LABEL: define <8 x i32> @test_expand_d_256(
; CHECK-SAME: <8 x i32> [[DATA:%.*]], <8 x i32> [[DATA2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP5]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1427,9 +1427,9 @@ define <8 x i32> @test_mask_expand_d_256(<8 x i32> %data, <8 x i32> %passthru, i
;
; CHECK-LABEL: define <8 x i32> @test_mask_expand_d_256(
; CHECK-SAME: <8 x i32> [[DATA:%.*]], <8 x i32> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -1459,7 +1459,7 @@ define <8 x i32> @test_maskz_expand_d_256(<8 x i32> %data, i8 %mask) #0 {
;
; CHECK-LABEL: define <8 x i32> @test_maskz_expand_d_256(
; CHECK-SAME: <8 x i32> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -1487,7 +1487,7 @@ define i8 @test_cmpps_256(<8 x float> %a, <8 x float> %b) #0 {
; CHECK-LABEL: define i8 @test_cmpps_256(
; CHECK-SAME: <8 x float> [[A:%.*]], <8 x float> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP5]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1514,7 +1514,7 @@ define i8 @test_cmpps_128(<4 x float> %a, <4 x float> %b) #0 {
; CHECK-LABEL: define i8 @test_cmpps_128(
; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP5]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1543,7 +1543,7 @@ define i8 @test_cmppd_256(<4 x double> %a, <4 x double> %b) #0 {
; CHECK-LABEL: define i8 @test_cmppd_256(
; CHECK-SAME: <4 x double> [[A:%.*]], <4 x double> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP5]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1572,7 +1572,7 @@ define i8 @test_cmppd_128(<2 x double> %a, <2 x double> %b) #0 {
; CHECK-LABEL: define i8 @test_cmppd_128(
; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1602,8 +1602,8 @@ define <8 x float> @test_mm512_maskz_max_ps_256(<8 x float> %a0, <8 x float> %a1
; CHECK-LABEL: define <8 x float> @test_mm512_maskz_max_ps_256(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP9]], [[TMP10]]
; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> [[A0]], <8 x float> [[A1]])
@@ -1630,9 +1630,9 @@ define <8 x float> @test_mm512_mask_max_ps_256(<8 x float> %a0, <8 x float> %a1,
; CHECK-LABEL: define <8 x float> @test_mm512_mask_max_ps_256(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[SRC:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP7]], [[TMP10]]
; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> [[A0]], <8 x float> [[A1]])
@@ -1659,7 +1659,7 @@ define <8 x float> @test_mm512_max_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %
; CHECK-LABEL: define <8 x float> @test_mm512_max_ps_256(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> [[A0]], <8 x float> [[A1]])
@@ -1676,8 +1676,8 @@ define <4 x float> @test_mm512_maskz_max_ps_128(<4 x float> %a0, <4 x float> %a1
; CHECK-LABEL: define <4 x float> @test_mm512_maskz_max_ps_128(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP9]], [[TMP10]]
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> [[A0]], <4 x float> [[A1]])
@@ -1707,9 +1707,9 @@ define <4 x float> @test_mm512_mask_max_ps_128(<4 x float> %a0, <4 x float> %a1,
; CHECK-LABEL: define <4 x float> @test_mm512_mask_max_ps_128(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[SRC:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP7]], [[TMP10]]
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> [[A0]], <4 x float> [[A1]])
@@ -1739,7 +1739,7 @@ define <4 x float> @test_mm512_max_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %
; CHECK-LABEL: define <4 x float> @test_mm512_max_ps_128(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> [[A0]], <4 x float> [[A1]])
@@ -1756,8 +1756,8 @@ define <8 x float> @test_mm512_maskz_min_ps_256(<8 x float> %a0, <8 x float> %a1
; CHECK-LABEL: define <8 x float> @test_mm512_maskz_min_ps_256(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP9]], [[TMP10]]
; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> [[A0]], <8 x float> [[A1]])
@@ -1784,9 +1784,9 @@ define <8 x float> @test_mm512_mask_min_ps_256(<8 x float> %a0, <8 x float> %a1,
; CHECK-LABEL: define <8 x float> @test_mm512_mask_min_ps_256(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[SRC:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP7]], [[TMP10]]
; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> [[A0]], <8 x float> [[A1]])
@@ -1813,7 +1813,7 @@ define <8 x float> @test_mm512_min_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %
; CHECK-LABEL: define <8 x float> @test_mm512_min_ps_256(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> [[A0]], <8 x float> [[A1]])
@@ -1830,9 +1830,9 @@ define <4 x float> @test_mm512_maskz_min_ps_128(<4 x float> %a0, <4 x float> %a1
; CHECK-LABEL: define <4 x float> @test_mm512_maskz_min_ps_128(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP11]], [[TMP12]]
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> [[A0]], <4 x float> [[A1]])
@@ -1865,10 +1865,10 @@ define <4 x float> @test_mm512_mask_min_ps_128(<4 x float> %a0, <4 x float> %a1,
; CHECK-LABEL: define <4 x float> @test_mm512_mask_min_ps_128(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[SRC:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP8]], [[TMP12]]
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> [[A0]], <4 x float> [[A1]])
@@ -1901,7 +1901,7 @@ define <4 x float> @test_mm512_min_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %
; CHECK-LABEL: define <4 x float> @test_mm512_min_ps_128(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> [[A0]], <4 x float> [[A1]])
@@ -1962,8 +1962,8 @@ define <4 x i32>@test_int_x86_avx512_vpermi2var_d_128(<4 x i32> %x0, <4 x i32> %
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpermi2var_d_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i32> [[TMP8]] to <4 x i2>
; CHECK-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> [[TMP6]], <4 x i32> [[X1]], <4 x i32> [[TMP5]])
@@ -1987,9 +1987,9 @@ define <4 x i32>@test_int_x86_avx512_mask_vpermi2var_d_128(<4 x i32> %x0, <4 x i
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_vpermi2var_d_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = trunc <4 x i32> [[TMP3]] to <4 x i2>
; CHECK-NEXT: [[TMP5:%.*]] = call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> [[TMP8]], <4 x i32> [[X1]], <4 x i32> [[TMP6]])
@@ -2024,8 +2024,8 @@ define <4 x i32>@test_int_x86_avx512_mask_vpermi2var_d_128(<4 x i32> %x0, <4 x i
define <4 x i32>@test_int_x86_avx512_vpermt2var_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) #0 {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpermt2var_d_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i32> [[TMP8]] to <4 x i2>
@@ -2049,10 +2049,10 @@ define <4 x i32>@test_int_x86_avx512_mask_vpermt2var_d_128(<4 x i32> %x0, <4 x i
;
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_vpermt2var_d_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = trunc <4 x i32> [[TMP3]] to <4 x i2>
; CHECK-NEXT: [[TMP5:%.*]] = call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> [[TMP8]], <4 x i32> [[X0]], <4 x i32> [[TMP6]])
@@ -2088,10 +2088,10 @@ define <4 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_128(<4 x i32> %x0, <4 x
;
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_vpermt2var_d_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP14:%.*]] = trunc <4 x i32> [[TMP3]] to <4 x i2>
; CHECK-NEXT: [[TMP13:%.*]] = call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> [[TMP8]], <4 x i32> [[X0]], <4 x i32> [[TMP9]])
@@ -2129,8 +2129,8 @@ define <8 x i32>@test_int_x86_avx512_vpermi2var_d_256(<8 x i32> %x0, <8 x i32> %
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpermi2var_d_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i32> [[TMP8]] to <8 x i3>
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> [[TMP6]], <8 x i32> [[X1]], <8 x i32> [[TMP5]])
@@ -2154,9 +2154,9 @@ define <8 x i32>@test_int_x86_avx512_mask_vpermi2var_d_256(<8 x i32> %x0, <8 x i
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_vpermi2var_d_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = trunc <8 x i32> [[TMP3]] to <8 x i3>
; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> [[TMP8]], <8 x i32> [[X1]], <8 x i32> [[TMP6]])
@@ -2188,8 +2188,8 @@ define <8 x i32>@test_int_x86_avx512_mask_vpermi2var_d_256(<8 x i32> %x0, <8 x i
define <8 x i32>@test_int_x86_avx512_ask_vpermt2var_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) #0 {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_ask_vpermt2var_d_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i32> [[TMP8]] to <8 x i3>
@@ -2213,10 +2213,10 @@ define <8 x i32>@test_int_x86_avx512_mask_vpermt2var_d_256(<8 x i32> %x0, <8 x i
;
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_vpermt2var_d_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = trunc <8 x i32> [[TMP3]] to <8 x i3>
; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> [[TMP8]], <8 x i32> [[X0]], <8 x i32> [[TMP6]])
@@ -2249,10 +2249,10 @@ define <8 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_256(<8 x i32> %x0, <8 x
;
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_maskz_vpermt2var_d_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP14:%.*]] = trunc <8 x i32> [[TMP3]] to <8 x i3>
; CHECK-NEXT: [[TMP13:%.*]] = call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> [[TMP8]], <8 x i32> [[X0]], <8 x i32> [[TMP9]])
@@ -2287,8 +2287,8 @@ define <2 x double>@test_int_x86_avx512_vpermi2var_pd_128(<2 x double> %x0, <2 x
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_vpermi2var_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x double> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <2 x i64> [[TMP6]] to <2 x i1>
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <2 x i64> [[TMP9]] to <2 x double>
@@ -2315,9 +2315,9 @@ define <2 x double>@test_int_x86_avx512_mask_vpermi2var_pd_128(<2 x double> %x0,
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_vpermi2var_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP15:%.*]] = trunc <2 x i64> [[TMP13]] to <2 x i1>
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <2 x i64> [[TMP11]] to <2 x double>
@@ -2362,8 +2362,8 @@ define <4 x double>@test_int_x86_avx512_vpermi2var_pd_256(<4 x double> %x0, <4 x
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_vpermi2var_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x double> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i64> [[TMP6]] to <4 x i2>
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x i64> [[TMP9]] to <4 x double>
@@ -2390,9 +2390,9 @@ define <4 x double>@test_int_x86_avx512_mask_vpermi2var_pd_256(<4 x double> %x0,
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_vpermi2var_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP15:%.*]] = trunc <4 x i64> [[TMP13]] to <4 x i2>
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x i64> [[TMP11]] to <4 x double>
@@ -2437,8 +2437,8 @@ define <4 x float>@test_int_x86_avx512_vpermi2var_ps_128(<4 x float> %x0, <4 x i
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_vpermi2var_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x float> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i32> [[TMP6]] to <4 x i2>
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP9]] to <4 x float>
@@ -2465,9 +2465,9 @@ define <4 x float>@test_int_x86_avx512_mask_vpermi2var_ps_128(<4 x float> %x0, <
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_vpermi2var_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP15:%.*]] = trunc <4 x i32> [[TMP13]] to <4 x i2>
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP11]] to <4 x float>
@@ -2510,10 +2510,10 @@ define <4 x float>@test_int_x86_avx512_mask_vpermi2var_ps_128_cast(<4 x float> %
;
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_vpermi2var_ps_128_cast(
; CHECK-SAME: <4 x float> [[X0:%.*]], <2 x i64> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP14:%.*]] = bitcast <2 x i64> [[TMP11]] to <4 x i32>
; CHECK-NEXT: [[X1CAST:%.*]] = bitcast <2 x i64> [[X1]] to <4 x i32>
@@ -2561,8 +2561,8 @@ define <8 x float>@test_int_x86_avx512_vpermi2var_ps_256(<8 x float> %x0, <8 x i
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_vpermi2var_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x float> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i32> [[TMP6]] to <8 x i3>
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x i32> [[TMP9]] to <8 x float>
@@ -2589,9 +2589,9 @@ define <8 x float>@test_int_x86_avx512_mask_vpermi2var_ps_256(<8 x float> %x0, <
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask_vpermi2var_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP15:%.*]] = trunc <8 x i32> [[TMP13]] to <8 x i3>
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <8 x i32> [[TMP11]] to <8 x float>
@@ -2633,8 +2633,8 @@ define <2 x i64>@test_int_x86_avx512_vpermi2var_q_128(<2 x i64> %x0, <2 x i64> %
; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_vpermi2var_q_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <2 x i64> [[TMP8]] to <2 x i1>
; CHECK-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> [[TMP6]], <2 x i64> [[X1]], <2 x i64> [[TMP5]])
@@ -2658,9 +2658,9 @@ define <2 x i64>@test_int_x86_avx512_mask_vpermi2var_q_128(<2 x i64> %x0, <2 x i
; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_mask_vpermi2var_q_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = trunc <2 x i64> [[TMP3]] to <2 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> [[TMP8]], <2 x i64> [[X1]], <2 x i64> [[TMP6]])
@@ -2695,8 +2695,8 @@ define <2 x i64>@test_int_x86_avx512_mask_vpermi2var_q_128(<2 x i64> %x0, <2 x i
define <2 x i64>@test_int_x86_avx512_vpermt2var_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) #0 {
; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_vpermt2var_q_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <2 x i64> [[TMP8]] to <2 x i1>
@@ -2720,10 +2720,10 @@ define <2 x i64>@test_int_x86_avx512_mask_vpermt2var_q_128(<2 x i64> %x0, <2 x i
;
; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_mask_vpermt2var_q_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = trunc <2 x i64> [[TMP3]] to <2 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> [[TMP8]], <2 x i64> [[X0]], <2 x i64> [[TMP6]])
@@ -2759,10 +2759,10 @@ define <2 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_128(<2 x i64> %x0, <2 x
;
; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_maskz_vpermt2var_q_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP14:%.*]] = trunc <2 x i64> [[TMP3]] to <2 x i1>
; CHECK-NEXT: [[TMP13:%.*]] = call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> [[TMP8]], <2 x i64> [[X0]], <2 x i64> [[TMP9]])
@@ -2800,8 +2800,8 @@ define <4 x i64>@test_int_x86_avx512_vpermi2var_q_256(<4 x i64> %x0, <4 x i64> %
; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_vpermi2var_q_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i64> [[TMP8]] to <4 x i2>
; CHECK-NEXT: [[TMP4:%.*]] = call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> [[TMP6]], <4 x i64> [[X1]], <4 x i64> [[TMP5]])
@@ -2825,9 +2825,9 @@ define <4 x i64>@test_int_x86_avx512_mask_vpermi2var_q_256(<4 x i64> %x0, <4 x i
; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_mask_vpermi2var_q_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = trunc <4 x i64> [[TMP3]] to <4 x i2>
; CHECK-NEXT: [[TMP5:%.*]] = call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> [[TMP8]], <4 x i64> [[X1]], <4 x i64> [[TMP6]])
@@ -2862,8 +2862,8 @@ define <4 x i64>@test_int_x86_avx512_mask_vpermi2var_q_256(<4 x i64> %x0, <4 x i
define <4 x i64>@test_int_x86_avx512_vpermt2var_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) #0 {
; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_vpermt2var_q_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i64> [[TMP8]] to <4 x i2>
@@ -2887,10 +2887,10 @@ define <4 x i64>@test_int_x86_avx512_mask_vpermt2var_q_256(<4 x i64> %x0, <4 x i
;
; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_mask_vpermt2var_q_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = trunc <4 x i64> [[TMP3]] to <4 x i2>
; CHECK-NEXT: [[TMP5:%.*]] = call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> [[TMP8]], <4 x i64> [[X0]], <4 x i64> [[TMP6]])
@@ -2926,10 +2926,10 @@ define <4 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_256(<4 x i64> %x0, <4 x
;
; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_maskz_vpermt2var_q_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP14:%.*]] = trunc <4 x i64> [[TMP3]] to <4 x i2>
; CHECK-NEXT: [[TMP13:%.*]] = call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> [[TMP8]], <4 x i64> [[X0]], <4 x i64> [[TMP9]])
@@ -2967,8 +2967,8 @@ define <2 x double>@test_int_x86_avx512_scalef_pd_128(<2 x double> %x0, <2 x dou
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_scalef_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -2996,9 +2996,9 @@ define <2 x double>@test_int_x86_avx512_mask_scalef_pd_128(<2 x double> %x0, <2
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_scalef_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -3029,8 +3029,8 @@ define <4 x double>@test_int_x86_avx512_scalef_pd_256(<4 x double> %x0, <4 x dou
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_scalef_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -3058,9 +3058,9 @@ define <4 x double>@test_int_x86_avx512_mask_scalef_pd_256(<4 x double> %x0, <4
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_scalef_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -3091,8 +3091,8 @@ define <4 x float>@test_int_x86_avx512_scalef_ps_128(<4 x float> %x0, <4 x float
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_scalef_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -3120,9 +3120,9 @@ define <4 x float>@test_int_x86_avx512_mask_scalef_ps_128(<4 x float> %x0, <4 x
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_scalef_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -3153,8 +3153,8 @@ define <8 x float>@test_int_x86_avx512_scalef_ps_256(<8 x float> %x0, <8 x float
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_scalef_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -3182,9 +3182,9 @@ define <8 x float>@test_int_x86_avx512_mask_scalef_ps_256(<8 x float> %x0, <8 x
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask_scalef_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -3216,8 +3216,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_128(<2 x i64> %x0, <16 x i8> %
; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmov_qb_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -3273,8 +3273,8 @@ define void @test_int_x86_avx512_mask_pmov_qb_mem_128(ptr %ptr, <2 x i64> %x1, i
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_qb_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -3312,8 +3312,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_128(<2 x i64> %x0, <16 x i8>
; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovs_qb_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -3369,8 +3369,8 @@ define void @test_int_x86_avx512_mask_pmovs_qb_mem_128(ptr %ptr, <2 x i64> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_qb_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -3408,8 +3408,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_128(<2 x i64> %x0, <16 x i8>
; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovus_qb_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -3465,8 +3465,8 @@ define void @test_int_x86_avx512_mask_pmovus_qb_mem_128(ptr %ptr, <2 x i64> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_qb_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -3504,8 +3504,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_256(<4 x i64> %x0, <16 x i8> %
; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmov_qb_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -3561,8 +3561,8 @@ define void @test_int_x86_avx512_mask_pmov_qb_mem_256(ptr %ptr, <4 x i64> %x1, i
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_qb_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -3600,8 +3600,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_256(<4 x i64> %x0, <16 x i8>
; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovs_qb_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -3657,8 +3657,8 @@ define void @test_int_x86_avx512_mask_pmovs_qb_mem_256(ptr %ptr, <4 x i64> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_qb_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -3696,8 +3696,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_256(<4 x i64> %x0, <16 x i8>
; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovus_qb_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -3753,8 +3753,8 @@ define void @test_int_x86_avx512_mask_pmovus_qb_mem_256(ptr %ptr, <4 x i64> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_qb_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -3792,8 +3792,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_128(<2 x i64> %x0, <8 x i16> %
; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmov_qw_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -3849,8 +3849,8 @@ define void @test_int_x86_avx512_mask_pmov_qw_mem_128(ptr %ptr, <2 x i64> %x1, i
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_qw_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -3888,8 +3888,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_128(<2 x i64> %x0, <8 x i16>
; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovs_qw_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -3945,8 +3945,8 @@ define void @test_int_x86_avx512_mask_pmovs_qw_mem_128(ptr %ptr, <2 x i64> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_qw_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -3984,8 +3984,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_128(<2 x i64> %x0, <8 x i16>
; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovus_qw_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -4041,8 +4041,8 @@ define void @test_int_x86_avx512_mask_pmovus_qw_mem_128(ptr %ptr, <2 x i64> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_qw_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -4080,8 +4080,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_256(<4 x i64> %x0, <8 x i16> %
; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmov_qw_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -4137,8 +4137,8 @@ define void @test_int_x86_avx512_mask_pmov_qw_mem_256(ptr %ptr, <4 x i64> %x1, i
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_qw_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -4176,8 +4176,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_256(<4 x i64> %x0, <8 x i16>
; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovs_qw_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -4233,8 +4233,8 @@ define void @test_int_x86_avx512_mask_pmovs_qw_mem_256(ptr %ptr, <4 x i64> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_qw_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -4272,8 +4272,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_256(<4 x i64> %x0, <8 x i16>
; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovus_qw_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -4329,8 +4329,8 @@ define void @test_int_x86_avx512_mask_pmovus_qw_mem_256(ptr %ptr, <4 x i64> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_qw_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -4368,8 +4368,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmov_qd_128(<2 x i64> %x0, <4 x i32> %
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pmov_qd_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -4425,8 +4425,8 @@ define void @test_int_x86_avx512_mask_pmov_qd_mem_128(ptr %ptr, <2 x i64> %x1, i
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_qd_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -4464,8 +4464,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmovs_qd_128(<2 x i64> %x0, <4 x i32>
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pmovs_qd_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -4521,8 +4521,8 @@ define void @test_int_x86_avx512_mask_pmovs_qd_mem_128(ptr %ptr, <2 x i64> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_qd_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -4560,8 +4560,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmovus_qd_128(<2 x i64> %x0, <4 x i32>
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pmovus_qd_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -4617,8 +4617,8 @@ define void @test_int_x86_avx512_mask_pmovus_qd_mem_128(ptr %ptr, <2 x i64> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_qd_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -4668,8 +4668,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmov_qd_256(<4 x i64> %x0, <4 x i32> %
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pmov_qd_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <4 x i64> [[TMP6]] to <4 x i32>
; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i64> [[X0]] to <4 x i32>
@@ -4698,7 +4698,7 @@ define <4 x i32>@test_int_x86_avx512_maskz_pmov_qd_256(<4 x i64> %x0, i8 %x2) #0
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_pmov_qd_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <4 x i64> [[TMP3]] to <4 x i32>
; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i64> [[X0]] to <4 x i32>
@@ -4729,8 +4729,8 @@ define void @test_int_x86_avx512_mask_pmov_qd_mem_256(ptr %ptr, <4 x i64> %x1, i
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_qd_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -4767,7 +4767,7 @@ define <4 x i32>@test_int_x86_avx512_pmovs_qd_256(<4 x i64> %x0, <4 x i32> %x1)
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_pmovs_qd_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -4792,8 +4792,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmovs_qd_256(<4 x i64> %x0, <4 x i32>
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pmovs_qd_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -4820,7 +4820,7 @@ define <4 x i32>@test_int_x86_avx512_maskz_pmovs_qd_256(<4 x i64> %x0, i8 %x2) #
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_pmovs_qd_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -4846,8 +4846,8 @@ define void @test_int_x86_avx512_mask_pmovs_qd_mem_256(ptr %ptr, <4 x i64> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_qd_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -4884,7 +4884,7 @@ define <4 x i32>@test_int_x86_avx512_pmovus_qd_256(<4 x i64> %x0, <4 x i32> %x1)
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_pmovus_qd_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -4909,8 +4909,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmovus_qd_256(<4 x i64> %x0, <4 x i32>
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pmovus_qd_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -4937,7 +4937,7 @@ define <4 x i32>@test_int_x86_avx512_maskz_pmovus_qd_256(<4 x i64> %x0, i8 %x2)
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_pmovus_qd_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -4963,8 +4963,8 @@ define void @test_int_x86_avx512_mask_pmovus_qd_mem_256(ptr %ptr, <4 x i64> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_qd_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -5002,8 +5002,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_db_128(<4 x i32> %x0, <16 x i8> %
; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmov_db_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -5059,8 +5059,8 @@ define void @test_int_x86_avx512_mask_pmov_db_mem_128(ptr %ptr, <4 x i32> %x1, i
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_db_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -5098,8 +5098,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_128(<4 x i32> %x0, <16 x i8>
; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovs_db_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -5155,8 +5155,8 @@ define void @test_int_x86_avx512_mask_pmovs_db_mem_128(ptr %ptr, <4 x i32> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_db_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -5194,8 +5194,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_128(<4 x i32> %x0, <16 x i8>
; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovus_db_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -5251,8 +5251,8 @@ define void @test_int_x86_avx512_mask_pmovus_db_mem_128(ptr %ptr, <4 x i32> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_db_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -5290,8 +5290,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_db_256(<8 x i32> %x0, <16 x i8> %
; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmov_db_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -5347,8 +5347,8 @@ define void @test_int_x86_avx512_mask_pmov_db_mem_256(ptr %ptr, <8 x i32> %x1, i
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_db_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
@@ -5386,8 +5386,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_256(<8 x i32> %x0, <16 x i8>
; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovs_db_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -5443,8 +5443,8 @@ define void @test_int_x86_avx512_mask_pmovs_db_mem_256(ptr %ptr, <8 x i32> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_db_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
@@ -5482,8 +5482,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_256(<8 x i32> %x0, <16 x i8>
; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovus_db_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -5539,8 +5539,8 @@ define void @test_int_x86_avx512_mask_pmovus_db_mem_256(ptr %ptr, <8 x i32> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_db_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
@@ -5578,8 +5578,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_dw_128(<4 x i32> %x0, <8 x i16> %
; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmov_dw_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -5635,8 +5635,8 @@ define void @test_int_x86_avx512_mask_pmov_dw_mem_128(ptr %ptr, <4 x i32> %x1, i
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_dw_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -5674,8 +5674,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_dw_128(<4 x i32> %x0, <8 x i16>
; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovs_dw_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -5731,8 +5731,8 @@ define void @test_int_x86_avx512_mask_pmovs_dw_mem_128(ptr %ptr, <4 x i32> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_dw_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -5770,8 +5770,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_dw_128(<4 x i32> %x0, <8 x i16>
; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovus_dw_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -5827,8 +5827,8 @@ define void @test_int_x86_avx512_mask_pmovus_dw_mem_128(ptr %ptr, <4 x i32> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_dw_mem_128(
; CHECK-SAME: ptr [[PTR:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -5866,8 +5866,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_dw_256(<8 x i32> %x0, <8 x i16> %
; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmov_dw_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -5923,8 +5923,8 @@ define void @test_int_x86_avx512_mask_pmov_dw_mem_256(ptr %ptr, <8 x i32> %x1, i
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_dw_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
@@ -5962,8 +5962,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_dw_256(<8 x i32> %x0, <8 x i16>
; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovs_dw_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -6019,8 +6019,8 @@ define void @test_int_x86_avx512_mask_pmovs_dw_mem_256(ptr %ptr, <8 x i32> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_dw_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
@@ -6058,8 +6058,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_dw_256(<8 x i32> %x0, <8 x i16>
; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovus_dw_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -6115,8 +6115,8 @@ define void @test_int_x86_avx512_mask_pmovus_dw_mem_256(ptr %ptr, <8 x i32> %x1,
; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_dw_mem_256(
; CHECK-SAME: ptr [[PTR:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
@@ -6154,8 +6154,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_128(<2 x double> %x0, <4 x i
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_pd2dq_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -6195,7 +6195,7 @@ define <4 x i32>@test_int_x86_avx512_cvt_pd2dq_128_zext(<2 x double> %x0, <4 x i
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvt_pd2dq_128_zext(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -6222,8 +6222,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_128_zext(<2 x double> %x0, <
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_pd2dq_128_zext(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -6253,7 +6253,7 @@ define <4 x float>@test_int_x86_avx512_cvt_pd2ps(<2 x double> %x0, <4 x float> %
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_cvt_pd2ps(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x float> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -6278,8 +6278,8 @@ define <4 x float>@test_int_x86_avx512_mask_cvt_pd2ps(<2 x double> %x0, <4 x flo
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_cvt_pd2ps(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x float> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -6305,7 +6305,7 @@ define <4 x float>@test_int_x86_avx512_cvt_pd2ps_zext(<2 x double> %x0, <4 x flo
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_cvt_pd2ps_zext(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x float> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -6332,8 +6332,8 @@ define <4 x float>@test_int_x86_avx512_mask_cvt_pd2ps_zext(<2 x double> %x0, <4
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_cvt_pd2ps_zext(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x float> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -6363,7 +6363,7 @@ define <4 x i32>@test_int_x86_avx512_cvt_pd2udq_128(<2 x double> %x0, <4 x i32>
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvt_pd2udq_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -6388,8 +6388,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_128(<2 x double> %x0, <4 x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_pd2udq_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -6415,7 +6415,7 @@ define <4 x i32>@test_int_x86_avx512_cvt_pd2udq_128_zext(<2 x double> %x0, <4 x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvt_pd2udq_128_zext(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -6442,8 +6442,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_128_zext(<2 x double> %x0,
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_pd2udq_128_zext(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -6473,7 +6473,7 @@ define <4 x i32>@test_int_x86_avx512_cvt_pd2udq_256(<4 x double> %x0, <4 x i32>
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvt_pd2udq_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -6498,8 +6498,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_256(<4 x double> %x0, <4 x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_pd2udq_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -6527,7 +6527,7 @@ define <4 x i32>@test_int_x86_avx512_cvt_ps2dq_128(<4 x float> %x0, <4 x i32> %x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvt_ps2dq_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -6552,8 +6552,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_ps2dq_128(<4 x float> %x0, <4 x i3
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_ps2dq_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -6581,7 +6581,7 @@ define <8 x i32>@test_int_x86_avx512_cvt_ps2dq_256(<8 x float> %x0, <8 x i32> %x
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_cvt_ps2dq_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -6606,8 +6606,8 @@ define <8 x i32>@test_int_x86_avx512_mask_cvt_ps2dq_256(<8 x float> %x0, <8 x i3
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_cvt_ps2dq_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -6635,7 +6635,7 @@ define <4 x i32>@test_int_x86_avx512_cvt_ps2udq_128(<4 x float> %x0, <4 x i32> %
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvt_ps2udq_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -6660,8 +6660,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_ps2udq_128(<4 x float> %x0, <4 x i
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_ps2udq_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -6689,7 +6689,7 @@ define <8 x i32>@test_int_x86_avx512_cvt_ps2udq_256(<8 x float> %x0, <8 x i32> %
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_cvt_ps2udq_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -6714,8 +6714,8 @@ define <8 x i32>@test_int_x86_avx512_mask_cvt_ps2udq_256(<8 x float> %x0, <8 x i
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_cvt_ps2udq_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -6743,7 +6743,7 @@ define <4 x i32>@test_int_x86_avx512_ask_cvtt_pd2dq_128(<2 x double> %x0, <4 x i
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_ask_cvtt_pd2dq_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -6768,8 +6768,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_128(<2 x double> %x0, <4 x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_pd2dq_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -6795,7 +6795,7 @@ define <4 x i32>@test_int_x86_avx512_cvtt_pd2dq_128_zext(<2 x double> %x0, <4 x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvtt_pd2dq_128_zext(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -6822,8 +6822,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_128_zext(<2 x double> %x0,
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_pd2dq_128_zext(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -6853,7 +6853,7 @@ define <4 x i32>@test_int_x86_avx512_cvtt_pd2udq_128(<2 x double> %x0, <4 x i32>
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvtt_pd2udq_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -6878,8 +6878,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_128(<2 x double> %x0, <4 x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_pd2udq_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -6905,7 +6905,7 @@ define <4 x i32>@test_int_x86_avx512_cvtt_pd2udq_128_zext(<2 x double> %x0, <4 x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvtt_pd2udq_128_zext(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -6932,8 +6932,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_128_zext(<2 x double> %x0,
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_pd2udq_128_zext(
; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -6963,7 +6963,7 @@ define <4 x i32>@test_int_x86_avx512_cvtt_pd2udq_256(<4 x double> %x0, <4 x i32>
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvtt_pd2udq_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -6988,8 +6988,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_256(<4 x double> %x0, <4 x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_pd2udq_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -7017,7 +7017,7 @@ define <4 x i32>@test_int_x86_avx512_cvtt_ps2udq_128(<4 x float> %x0, <4 x i32>
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvtt_ps2udq_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -7042,8 +7042,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_ps2udq_128(<4 x float> %x0, <4 x
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_ps2udq_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -7071,7 +7071,7 @@ define <8 x i32>@test_int_x86_avx512_cvtt_ps2udq_256(<8 x float> %x0, <8 x i32>
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_cvtt_ps2udq_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -7096,8 +7096,8 @@ define <8 x i32>@test_int_x86_avx512_mask_cvtt_ps2udq_256(<8 x float> %x0, <8 x
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_cvtt_ps2udq_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -7125,9 +7125,9 @@ define <2 x double>@test_int_x86_avx512_mask_rndscale_pd_128(<2 x double> %x0, <
;
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_rndscale_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[X3]] to i2
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i2 [[TMP4]] to <2 x i1>
@@ -7162,9 +7162,9 @@ define <4 x double>@test_int_x86_avx512_mask_rndscale_pd_256(<4 x double> %x0, <
;
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_rndscale_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[X3]] to i4
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1>
@@ -7199,9 +7199,9 @@ define <4 x float>@test_int_x86_avx512_mask_rndscale_ps_128(<4 x float> %x0, <4
;
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_rndscale_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[X3]] to i4
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1>
@@ -7236,9 +7236,9 @@ define <8 x float>@test_int_x86_avx512_mask_rndscale_ps_256(<8 x float> %x0, <8
;
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask_rndscale_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[X3]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer
@@ -7273,8 +7273,8 @@ define <2 x double>@test_int_x86_avx512_mask_getmant_pd_128(<2 x double> %x0, <2
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_getmant_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -7330,8 +7330,8 @@ define <4 x double>@test_int_x86_avx512_mask_getmant_pd_256(<4 x double> %x0, <4
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_getmant_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -7374,8 +7374,8 @@ define <4 x float>@test_int_x86_avx512_mask_getmant_ps_128(<4 x float> %x0, <4 x
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_getmant_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -7418,8 +7418,8 @@ define <8 x float>@test_int_x86_avx512_mask_getmant_ps_256(<8 x float> %x0, <8 x
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask_getmant_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -7461,8 +7461,8 @@ define <4 x i32>@test_int_x86_avx512_pternlog_d_128(<4 x i32> %x0, <4 x i32> %x1
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_pternlog_d_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP7]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -7490,9 +7490,9 @@ define <4 x i32>@test_int_x86_avx512_mask_pternlog_d_128(<4 x i32> %x0, <4 x i32
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pternlog_d_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x i32> [[TMP8]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP12]], 0
@@ -7535,9 +7535,9 @@ define <4 x i32>@test_int_x86_avx512_maskz_pternlog_d_128(<4 x i32> %x0, <4 x i3
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_pternlog_d_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x i32> [[TMP8]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP13]], 0
@@ -7579,8 +7579,8 @@ define <8 x i32>@test_int_x86_avx512_pternlog_d_256(<8 x i32> %x0, <8 x i32> %x1
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_pternlog_d_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP7]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -7608,9 +7608,9 @@ define <8 x i32>@test_int_x86_avx512_mask_pternlog_d_256(<8 x i32> %x0, <8 x i32
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_pternlog_d_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x i32> [[TMP8]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP12]], 0
@@ -7650,9 +7650,9 @@ define <8 x i32>@test_int_x86_avx512_maskz_pternlog_d_256(<8 x i32> %x0, <8 x i3
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_maskz_pternlog_d_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = bitcast <8 x i32> [[TMP8]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP13]], 0
@@ -7691,8 +7691,8 @@ define <2 x i64>@test_int_x86_avx512_pternlog_q_128(<2 x i64> %x0, <2 x i64> %x1
; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_pternlog_q_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP7]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
@@ -7720,9 +7720,9 @@ define <2 x i64>@test_int_x86_avx512_mask_pternlog_q_128(<2 x i64> %x0, <2 x i64
; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_mask_pternlog_q_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <2 x i64> [[TMP8]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP12]], 0
@@ -7763,9 +7763,9 @@ define <2 x i64>@test_int_x86_avx512_maskz_pternlog_q_128(<2 x i64> %x0, <2 x i6
; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_maskz_pternlog_q_128(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = bitcast <2 x i64> [[TMP8]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP13]], 0
@@ -7807,8 +7807,8 @@ define <4 x i64>@test_int_x86_avx512_pternlog_q_256(<4 x i64> %x0, <4 x i64> %x1
; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_pternlog_q_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP7]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
@@ -7836,9 +7836,9 @@ define <4 x i64>@test_int_x86_avx512_mask_pternlog_q_256(<4 x i64> %x0, <4 x i64
; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_mask_pternlog_q_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x i64> [[TMP8]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP12]], 0
@@ -7879,9 +7879,9 @@ define <4 x i64>@test_int_x86_avx512_maskz_pternlog_q_256(<4 x i64> %x0, <4 x i6
; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_maskz_pternlog_q_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x i64> [[TMP8]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP13]], 0
@@ -7922,8 +7922,8 @@ define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0, i8 %mask, <8 x i16> %s
; CHECK-LABEL: define <8 x i16> @test_x86_vcvtps2ph_128(
; CHECK-SAME: <4 x float> [[A0:%.*]], i8 [[MASK:%.*]], <8 x i16> [[SRC:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i32> [[TMP4]], zeroinitializer
@@ -7980,8 +7980,8 @@ define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0, i8 %mask, <8 x i16> %s
; CHECK-LABEL: define <8 x i16> @test_x86_vcvtps2ph_256(
; CHECK-SAME: <8 x float> [[A0:%.*]], i8 [[MASK:%.*]], <8 x i16> [[SRC:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i16>
@@ -8046,7 +8046,7 @@ define <8 x float> @test_rsqrt_ps_256_rrkz(<8 x float> %a0, i8 %mask) #0 {
;
; CHECK-LABEL: define <8 x float> @test_rsqrt_ps_256_rrkz(
; CHECK-SAME: <8 x float> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -8071,9 +8071,9 @@ define <8 x float> @test_rsqrt_ps_256_rrk(<8 x float> %a0, <8 x float> %a1, i8 %
;
; CHECK-LABEL: define <8 x float> @test_rsqrt_ps_256_rrk(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer
@@ -8113,7 +8113,7 @@ define <4 x float> @test_rsqrt_ps_128_rrkz(<4 x float> %a0, i8 %mask) #0 {
;
; CHECK-LABEL: define <4 x float> @test_rsqrt_ps_128_rrkz(
; CHECK-SAME: <4 x float> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc i8 [[MASK]] to i4
@@ -8139,9 +8139,9 @@ define <4 x float> @test_rsqrt_ps_128_rrk(<4 x float> %a0, <4 x float> %a1, i8 %
;
; CHECK-LABEL: define <4 x float> @test_rsqrt_ps_128_rrk(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK]] to i4
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1>
@@ -8185,7 +8185,7 @@ define <8 x float> @test_rcp_ps_256_rrkz(<8 x float> %a0, i8 %mask) #0 {
;
; CHECK-LABEL: define <8 x float> @test_rcp_ps_256_rrkz(
; CHECK-SAME: <8 x float> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
@@ -8210,9 +8210,9 @@ define <8 x float> @test_rcp_ps_256_rrk(<8 x float> %a0, <8 x float> %a1, i8 %ma
;
; CHECK-LABEL: define <8 x float> @test_rcp_ps_256_rrk(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer
@@ -8252,7 +8252,7 @@ define <4 x float> @test_rcp_ps_128_rrkz(<4 x float> %a0, i8 %mask) #0 {
;
; CHECK-LABEL: define <4 x float> @test_rcp_ps_128_rrkz(
; CHECK-SAME: <4 x float> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc i8 [[MASK]] to i4
@@ -8278,9 +8278,9 @@ define <4 x float> @test_rcp_ps_128_rrk(<4 x float> %a0, <4 x float> %a1, i8 %ma
;
; CHECK-LABEL: define <4 x float> @test_rcp_ps_128_rrk(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK]] to i4
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1>
@@ -8324,7 +8324,7 @@ define <4 x double> @test_rsqrt_pd_256_rrkz(<4 x double> %a0, i8 %mask) #0 {
;
; CHECK-LABEL: define <4 x double> @test_rsqrt_pd_256_rrkz(
; CHECK-SAME: <4 x double> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc i8 [[MASK]] to i4
@@ -8350,9 +8350,9 @@ define <4 x double> @test_rsqrt_pd_256_rrk(<4 x double> %a0, <4 x double> %a1, i
;
; CHECK-LABEL: define <4 x double> @test_rsqrt_pd_256_rrk(
; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK]] to i4
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1>
@@ -8393,7 +8393,7 @@ define <2 x double> @test_rsqrt_pd_128_rrkz(<2 x double> %a0, i8 %mask) #0 {
;
; CHECK-LABEL: define <2 x double> @test_rsqrt_pd_128_rrkz(
; CHECK-SAME: <2 x double> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc i8 [[MASK]] to i2
@@ -8419,9 +8419,9 @@ define <2 x double> @test_rsqrt_pd_128_rrk(<2 x double> %a0, <2 x double> %a1, i
;
; CHECK-LABEL: define <2 x double> @test_rsqrt_pd_128_rrk(
; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK]] to i2
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i2 [[TMP4]] to <2 x i1>
@@ -8465,7 +8465,7 @@ define <4 x double> @test_rcp_pd_256_rrkz(<4 x double> %a0, i8 %mask) #0 {
;
; CHECK-LABEL: define <4 x double> @test_rcp_pd_256_rrkz(
; CHECK-SAME: <4 x double> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc i8 [[MASK]] to i4
@@ -8491,9 +8491,9 @@ define <4 x double> @test_rcp_pd_256_rrk(<4 x double> %a0, <4 x double> %a1, i8
;
; CHECK-LABEL: define <4 x double> @test_rcp_pd_256_rrk(
; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK]] to i4
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1>
@@ -8534,7 +8534,7 @@ define <2 x double> @test_rcp_pd_128_rrkz(<2 x double> %a0, i8 %mask) #0 {
;
; CHECK-LABEL: define <2 x double> @test_rcp_pd_128_rrkz(
; CHECK-SAME: <2 x double> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc i8 [[MASK]] to i2
@@ -8560,9 +8560,9 @@ define <2 x double> @test_rcp_pd_128_rrk(<2 x double> %a0, <2 x double> %a1, i8
;
; CHECK-LABEL: define <2 x double> @test_rcp_pd_128_rrk(
; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK]] to i2
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i2 [[TMP4]] to <2 x i1>
@@ -8592,7 +8592,7 @@ define <4 x double>@test_int_x86_avx512_permvar_df_256(<4 x double> %x0, <4 x i6
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_permvar_df_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x double> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP5]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -8617,9 +8617,9 @@ define <4 x double>@test_int_x86_avx512_mask_permvar_df_256(<4 x double> %x0, <4
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_permvar_df_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP14:%.*]] = bitcast <4 x i64> [[TMP8]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP14]], 0
@@ -8659,8 +8659,8 @@ define <4 x double>@test_int_x86_avx512_maskz_permvar_df_256(<4 x double> %x0, <
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_maskz_permvar_df_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i64> [[X1:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x i64> [[TMP10]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP13]], 0
@@ -8700,7 +8700,7 @@ define <4 x i64>@test_int_x86_avx512_permvar_di_256(<4 x i64> %x0, <4 x i64> %x1
; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_permvar_di_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx512.permvar.di.256(<4 x i64> [[X0]], <4 x i64> [[X1]])
@@ -8716,9 +8716,9 @@ define <4 x i64>@test_int_x86_avx512_mask_permvar_di_256(<4 x i64> %x0, <4 x i64
; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_mask_permvar_di_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP5]], [[TMP9]]
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx512.permvar.di.256(<4 x i64> [[X0]], <4 x i64> [[X1]])
@@ -8747,8 +8747,8 @@ define <4 x i64>@test_int_x86_avx512_maskz_permvar_di_256(<4 x i64> %x0, <4 x i6
; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_maskz_permvar_di_256(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP8]], [[TMP9]]
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx512.permvar.di.256(<4 x i64> [[X0]], <4 x i64> [[X1]])
@@ -8779,9 +8779,9 @@ define <2 x double>@test_int_x86_avx512_mask_fixupimm_pd_128(<2 x double> %x0, <
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_fixupimm_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -8846,9 +8846,9 @@ define <2 x double>@test_int_x86_avx512_maskz_fixupimm_pd_128(<2 x double> %x0,
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_maskz_fixupimm_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -8898,9 +8898,9 @@ define <4 x double>@test_int_x86_avx512_mask_fixupimm_pd_256(<4 x double> %x0, <
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_fixupimm_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -8965,9 +8965,9 @@ define <4 x double>@test_int_x86_avx512_maskz_fixupimm_pd_256(<4 x double> %x0,
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_maskz_fixupimm_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -9032,9 +9032,9 @@ define <4 x float>@test_int_x86_avx512_mask_fixupimm_ps_128(<4 x float> %x0, <4
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_fixupimm_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -9099,9 +9099,9 @@ define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ps_128(<4 x float> %x0, <4
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_maskz_fixupimm_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -9166,9 +9166,9 @@ define <8 x float>@test_int_x86_avx512_mask_fixupimm_ps_256(<8 x float> %x0, <8
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask_fixupimm_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -9233,9 +9233,9 @@ define <8 x float>@test_int_x86_avx512_maskz_fixupimm_ps_256(<8 x float> %x0, <8
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_maskz_fixupimm_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -9297,7 +9297,7 @@ define <2 x i64> @test_x86_avx512_psra_q_128(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: define <2 x i64> @test_x86_avx512_psra_q_128(
; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -9318,10 +9318,10 @@ define <2 x i64> @test_x86_avx512_mask_psra_q_128(<2 x i64> %a0, <2 x i64> %a1,
; CHECK-LABEL: define <2 x i64> @test_x86_avx512_mask_psra_q_128(
; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], <2 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP7:%.*]] = trunc i128 [[TMP6]] to i64
@@ -9358,9 +9358,9 @@ define <2 x i64> @test_x86_avx512_maskz_psra_q_128(<2 x i64> %a0, <2 x i64> %a1,
; CHECK-LABEL: define <2 x i64> @test_x86_avx512_maskz_psra_q_128(
; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP17:%.*]] = trunc i128 [[TMP16]] to i64
@@ -9399,7 +9399,7 @@ define <4 x i64> @test_x86_avx512_psra_q_256(<4 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: define <4 x i64> @test_x86_avx512_psra_q_256(
; CHECK-SAME: <4 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -9420,10 +9420,10 @@ define <4 x i64> @test_x86_avx512_mask_psra_q_256(<4 x i64> %a0, <2 x i64> %a1,
; CHECK-LABEL: define <4 x i64> @test_x86_avx512_mask_psra_q_256(
; CHECK-SAME: <4 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], <4 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP7:%.*]] = trunc i128 [[TMP6]] to i64
@@ -9460,9 +9460,9 @@ define <4 x i64> @test_x86_avx512_maskz_psra_q_256(<4 x i64> %a0, <2 x i64> %a1,
; CHECK-LABEL: define <4 x i64> @test_x86_avx512_maskz_psra_q_256(
; CHECK-SAME: <4 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], <4 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP17:%.*]] = trunc i128 [[TMP16]] to i64
@@ -9516,9 +9516,9 @@ define <2 x i64> @test_x86_avx512_mask_psrai_q_128(<2 x i64> %a0, <2 x i64> %pas
; CHECK-LABEL: define <2 x i64> @test_x86_avx512_mask_psrai_q_128(
; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.avx512.psrai.q.128(<2 x i64> [[TMP5]], i32 7)
; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
@@ -9550,8 +9550,8 @@ define <2 x i64> @test_x86_avx512_maskz_psrai_q_128(<2 x i64> %a0, i8 %mask, i8
; CHECK-LABEL: define <2 x i64> @test_x86_avx512_maskz_psrai_q_128(
; CHECK-SAME: <2 x i64> [[A0:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.avx512.psrai.q.128(<2 x i64> [[TMP9]], i32 7)
; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
@@ -9600,9 +9600,9 @@ define <4 x i64> @test_x86_avx512_mask_psrai_q_256(<4 x i64> %a0, <4 x i64> %pas
; CHECK-LABEL: define <4 x i64> @test_x86_avx512_mask_psrai_q_256(
; CHECK-SAME: <4 x i64> [[A0:%.*]], <4 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx512.psrai.q.256(<4 x i64> [[TMP5]], i32 7)
; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer
@@ -9634,8 +9634,8 @@ define <4 x i64> @test_x86_avx512_maskz_psrai_q_256(<4 x i64> %a0, i8 %mask, i8
; CHECK-LABEL: define <4 x i64> @test_x86_avx512_maskz_psrai_q_256(
; CHECK-SAME: <4 x i64> [[A0:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx512.psrai.q.256(<4 x i64> [[TMP9]], i32 7)
; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer
@@ -9668,7 +9668,7 @@ define <2 x i64> @test_x86_avx512_psrav_q_128(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: define <2 x i64> @test_x86_avx512_psrav_q_128(
; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
@@ -9687,10 +9687,10 @@ define <2 x i64> @test_x86_avx512_mask_psrav_q_128(<2 x i64> %a0, <2 x i64> %a1,
; CHECK-LABEL: define <2 x i64> @test_x86_avx512_mask_psrav_q_128(
; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], <2 x i64> [[A2:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = sext <2 x i1> [[TMP6]] to <2 x i64>
@@ -9725,9 +9725,9 @@ define <2 x i64> @test_x86_avx512_maskz_psrav_q_128(<2 x i64> %a0, <2 x i64> %a1
; CHECK-LABEL: define <2 x i64> @test_x86_avx512_maskz_psrav_q_128(
; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = sext <2 x i1> [[TMP13]] to <2 x i64>
@@ -9763,7 +9763,7 @@ define <4 x i64> @test_x86_avx512_psrav_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: define <4 x i64> @test_x86_avx512_psrav_q_256(
; CHECK-SAME: <4 x i64> [[A0:%.*]], <4 x i64> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i64>
@@ -9782,10 +9782,10 @@ define <4 x i64> @test_x86_avx512_mask_psrav_q_256(<4 x i64> %a0, <4 x i64> %a1,
; CHECK-LABEL: define <4 x i64> @test_x86_avx512_mask_psrav_q_256(
; CHECK-SAME: <4 x i64> [[A0:%.*]], <4 x i64> [[A1:%.*]], <4 x i64> [[A2:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = sext <4 x i1> [[TMP6]] to <4 x i64>
@@ -9820,9 +9820,9 @@ define <4 x i64> @test_x86_avx512_maskz_psrav_q_256(<4 x i64> %a0, <4 x i64> %a1
; CHECK-LABEL: define <4 x i64> @test_x86_avx512_maskz_psrav_q_256(
; CHECK-SAME: <4 x i64> [[A0:%.*]], <4 x i64> [[A1:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = sext <4 x i1> [[TMP13]] to <4 x i64>
@@ -9858,8 +9858,8 @@ define <8 x float> @test_vfmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x flo
; CHECK-LABEL: define <8 x float> @test_vfmadd256_ps(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP4]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
@@ -9876,9 +9876,9 @@ define <8 x float> @test_mask_vfmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8
; CHECK-LABEL: define <8 x float> @test_mask_vfmadd256_ps(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP7]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
@@ -9906,8 +9906,8 @@ define <4 x float> @test_vfmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x flo
; CHECK-LABEL: define <4 x float> @test_vfmadd128_ps(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP4]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -9924,9 +9924,9 @@ define <4 x float> @test_mask_vfmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4
; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP7]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -9957,8 +9957,8 @@ define <4 x double> @test_fmadd256_pd(<4 x double> %a, <4 x double> %b, <4 x dou
; CHECK-LABEL: define <4 x double> @test_fmadd256_pd(
; CHECK-SAME: <4 x double> [[A:%.*]], <4 x double> [[B:%.*]], <4 x double> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP4]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP3]]
@@ -9975,9 +9975,9 @@ define <4 x double> @test_mask_fmadd256_pd(<4 x double> %a, <4 x double> %b, <4
; CHECK-LABEL: define <4 x double> @test_mask_fmadd256_pd(
; CHECK-SAME: <4 x double> [[A:%.*]], <4 x double> [[B:%.*]], <4 x double> [[C:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP7]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP3]]
@@ -10008,8 +10008,8 @@ define <2 x double> @test_fmadd128_pd(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-LABEL: define <2 x double> @test_fmadd128_pd(
; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP4]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP3]]
@@ -10026,9 +10026,9 @@ define <2 x double> @test_mask_fmadd128_pd(<2 x double> %a, <2 x double> %b, <2
; CHECK-LABEL: define <2 x double> @test_mask_fmadd128_pd(
; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP7]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP3]]
@@ -10060,9 +10060,9 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmadd_pd_128(<2 x double> %x0, <2
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask3_vfmadd_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP7]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP3]]
@@ -10094,9 +10094,9 @@ define <2 x double>@test_int_x86_avx512_maskz_vfmadd_pd_128(<2 x double> %x0, <2
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_maskz_vfmadd_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP9]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP3]]
@@ -10127,9 +10127,9 @@ define <4 x double>@test_int_x86_avx512_mask3_vfmadd_pd_256(<4 x double> %x0, <4
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask3_vfmadd_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP7]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP3]]
@@ -10161,9 +10161,9 @@ define <4 x double>@test_int_x86_avx512_maskz_vfmadd_pd_256(<4 x double> %x0, <4
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_maskz_vfmadd_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP9]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP3]]
@@ -10194,9 +10194,9 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ps_128(<4 x float> %x0, <4 x
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP7]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -10228,9 +10228,9 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ps_128(<4 x float> %x0, <4 x
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_maskz_vfmadd_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP9]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -10261,9 +10261,9 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmadd_ps_256(<8 x float> %x0, <8 x
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask3_vfmadd_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP7]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
@@ -10292,9 +10292,9 @@ define <8 x float>@test_int_x86_avx512_maskz_vfmadd_ps_256(<8 x float> %x0, <8 x
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_maskz_vfmadd_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP9]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
@@ -10321,10 +10321,10 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmsub_pd_128(<2 x double> %x0, <2
;
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask3_vfmsub_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> zeroinitializer, [[TMP8]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x double> splat (double -0.000000e+00), [[X2]]
@@ -10358,10 +10358,10 @@ define <4 x double>@test_int_x86_avx512_mask3_vfmsub_pd_256(<4 x double> %x0, <4
;
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask3_vfmsub_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> zeroinitializer, [[TMP8]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x double> splat (double -0.000000e+00), [[X2]]
@@ -10395,10 +10395,10 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ps_128(<4 x float> %x0, <4 x
;
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP8]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x float> splat (float -0.000000e+00), [[X2]]
@@ -10432,10 +10432,10 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmsub_ps_256(<8 x float> %x0, <8 x
;
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask3_vfmsub_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> zeroinitializer, [[TMP8]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x float> splat (float -0.000000e+00), [[X2]]
@@ -10465,9 +10465,9 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmsub_ps_256(<8 x float> %x0, <8 x
define <8 x float> @test_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-LABEL: define <8 x float> @test_vfnmadd256_ps(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> zeroinitializer, [[TMP4]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x float> splat (float -0.000000e+00), [[A1]]
@@ -10486,10 +10486,10 @@ define <8 x float> @test_mask_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8
;
; CHECK-LABEL: define <8 x float> @test_mask_vfnmadd256_ps(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> zeroinitializer, [[TMP8]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x float> splat (float -0.000000e+00), [[A1]]
@@ -10519,9 +10519,9 @@ define <8 x float> @test_mask_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8
define <4 x float> @test_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: define <4 x float> @test_vfnmadd128_ps(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP4]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x float> splat (float -0.000000e+00), [[A1]]
@@ -10540,10 +10540,10 @@ define <4 x float> @test_mask_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4
;
; CHECK-LABEL: define <4 x float> @test_mask_vfnmadd128_ps(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP8]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x float> splat (float -0.000000e+00), [[A1]]
@@ -10576,9 +10576,9 @@ define <4 x float> @test_mask_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4
define <4 x double> @test_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-LABEL: define <4 x double> @test_vfnmadd256_pd(
; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> zeroinitializer, [[TMP4]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x double> splat (double -0.000000e+00), [[A1]]
@@ -10597,10 +10597,10 @@ define <4 x double> @test_mask_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1,
;
; CHECK-LABEL: define <4 x double> @test_mask_vfnmadd256_pd(
; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> zeroinitializer, [[TMP8]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x double> splat (double -0.000000e+00), [[A1]]
@@ -10633,9 +10633,9 @@ define <4 x double> @test_mask_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1,
define <2 x double> @test_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: define <2 x double> @test_vfnmadd128_pd(
; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> zeroinitializer, [[TMP4]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x double> splat (double -0.000000e+00), [[A1]]
@@ -10654,10 +10654,10 @@ define <2 x double> @test_mask_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1,
;
; CHECK-LABEL: define <2 x double> @test_mask_vfnmadd128_pd(
; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> zeroinitializer, [[TMP8]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x double> splat (double -0.000000e+00), [[A1]]
@@ -10690,8 +10690,8 @@ define <2 x double> @test_mask_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1,
define <8 x float> @test_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-LABEL: define <8 x float> @test_vfnmsub256_ps(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> zeroinitializer, [[TMP4]]
@@ -10714,10 +10714,10 @@ define <8 x float> @test_mask_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8
;
; CHECK-LABEL: define <8 x float> @test_mask_vfnmsub256_ps(
; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> zeroinitializer, [[TMP5]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x float> splat (float -0.000000e+00), [[A1]]
@@ -10750,8 +10750,8 @@ define <8 x float> @test_mask_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8
define <4 x float> @test_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: define <4 x float> @test_vfnmsub128_ps(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP4]]
@@ -10774,10 +10774,10 @@ define <4 x float> @test_mask_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4
;
; CHECK-LABEL: define <4 x float> @test_mask_vfnmsub128_ps(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP5]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x float> splat (float -0.000000e+00), [[A1]]
@@ -10813,8 +10813,8 @@ define <4 x float> @test_mask_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4
define <4 x double> @test_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-LABEL: define <4 x double> @test_vfnmsub256_pd(
; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> zeroinitializer, [[TMP4]]
@@ -10837,10 +10837,10 @@ define <4 x double> @test_mask_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1,
;
; CHECK-LABEL: define <4 x double> @test_mask_vfnmsub256_pd(
; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> zeroinitializer, [[TMP5]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x double> splat (double -0.000000e+00), [[A1]]
@@ -10876,8 +10876,8 @@ define <4 x double> @test_mask_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1,
define <2 x double> @test_vfnmsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: define <2 x double> @test_vfnmsub128_pd(
; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> zeroinitializer, [[TMP4]]
@@ -10900,10 +10900,10 @@ define <2 x double> @test_mask_vfnmsub128_pd(<2 x double> %a0, <2 x double> %a1,
;
; CHECK-LABEL: define <2 x double> @test_mask_vfnmsub128_pd(
; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> zeroinitializer, [[TMP5]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x double> splat (double -0.000000e+00), [[A1]]
@@ -10941,9 +10941,9 @@ define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_128(<2 x double> %x0, <
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask3_vfnmsub_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> zeroinitializer, [[TMP5]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x double> splat (double -0.000000e+00), [[X0]]
@@ -10981,9 +10981,9 @@ define <4 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_256(<4 x double> %x0, <
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask3_vfnmsub_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> zeroinitializer, [[TMP5]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x double> splat (double -0.000000e+00), [[X0]]
@@ -11021,9 +11021,9 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_128(<4 x float> %x0, <4
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask3_vfnmsub_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP5]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x float> splat (float -0.000000e+00), [[X0]]
@@ -11061,9 +11061,9 @@ define <8 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_256(<8 x float> %x0, <8
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask3_vfnmsub_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> zeroinitializer, [[TMP5]]
; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x float> splat (float -0.000000e+00), [[X0]]
@@ -11097,8 +11097,8 @@ define <8 x float> @test_fmaddsub256_ps(<8 x float> %a, <8 x float> %b, <8 x flo
; CHECK-LABEL: define <8 x float> @test_fmaddsub256_ps(
; CHECK-SAME: <8 x float> [[A:%.*]], <8 x float> [[B:%.*]], <8 x float> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP7]]
@@ -11125,9 +11125,9 @@ define <8 x float> @test_mask_fmaddsub256_ps(<8 x float> %a, <8 x float> %b, <8
; CHECK-LABEL: define <8 x float> @test_mask_fmaddsub256_ps(
; CHECK-SAME: <8 x float> [[A:%.*]], <8 x float> [[B:%.*]], <8 x float> [[C:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP13]]
@@ -11165,8 +11165,8 @@ define <4 x float> @test_fmaddsub128_ps(<4 x float> %a, <4 x float> %b, <4 x flo
; CHECK-LABEL: define <4 x float> @test_fmaddsub128_ps(
; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP7]]
@@ -11193,9 +11193,9 @@ define <4 x float> @test_mask_fmaddsub128_ps(<4 x float> %a, <4 x float> %b, <4
; CHECK-LABEL: define <4 x float> @test_mask_fmaddsub128_ps(
; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP13]]
@@ -11236,8 +11236,8 @@ define <4 x double> @test_vfmaddsub256_pd(<4 x double> %a0, <4 x double> %a1, <4
; CHECK-LABEL: define <4 x double> @test_vfmaddsub256_pd(
; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP7]]
@@ -11264,9 +11264,9 @@ define <4 x double> @test_mask_vfmaddsub256_pd(<4 x double> %a0, <4 x double> %a
; CHECK-LABEL: define <4 x double> @test_mask_vfmaddsub256_pd(
; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP13]]
@@ -11307,8 +11307,8 @@ define <2 x double> @test_vfmaddsub128_pd(<2 x double> %a0, <2 x double> %a1, <2
; CHECK-LABEL: define <2 x double> @test_vfmaddsub128_pd(
; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP7]]
@@ -11335,9 +11335,9 @@ define <2 x double> @test_mask_vfmaddsub128_pd(<2 x double> %a0, <2 x double> %a
; CHECK-LABEL: define <2 x double> @test_mask_vfmaddsub128_pd(
; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP13]]
@@ -11379,9 +11379,9 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_128(<2 x double> %x0,
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask3_vfmaddsub_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP13]]
@@ -11423,9 +11423,9 @@ define <2 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_128(<2 x double> %x0,
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_maskz_vfmaddsub_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP6]], [[TMP12]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP13]]
@@ -11466,9 +11466,9 @@ define <4 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_256(<4 x double> %x0,
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask3_vfmaddsub_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP13]]
@@ -11510,9 +11510,9 @@ define <4 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_256(<4 x double> %x0,
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_maskz_vfmaddsub_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP6]], [[TMP12]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP13]]
@@ -11553,9 +11553,9 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_128(<4 x float> %x0, <
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask3_vfmaddsub_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP13]]
@@ -11597,9 +11597,9 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_128(<4 x float> %x0, <
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_maskz_vfmaddsub_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP6]], [[TMP12]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP13]]
@@ -11640,9 +11640,9 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_256(<8 x float> %x0, <
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask3_vfmaddsub_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP13]]
@@ -11681,9 +11681,9 @@ define <8 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_256(<8 x float> %x0, <
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_maskz_vfmaddsub_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP6]], [[TMP12]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP13]]
@@ -11721,9 +11721,9 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_128(<2 x double> %x0,
; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask3_vfmsubadd_pd_128(
; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP13]]
@@ -11765,9 +11765,9 @@ define <4 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_256(<4 x double> %x0,
; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask3_vfmsubadd_pd_256(
; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP13]]
@@ -11809,9 +11809,9 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_128(<4 x float> %x0, <
; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask3_vfmsubadd_ps_128(
; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP13]]
@@ -11853,9 +11853,9 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_256(<8 x float> %x0, <
; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask3_vfmsubadd_ps_256(
; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP13]]
@@ -11893,10 +11893,10 @@ define <4 x float> @test_mask_vfmadd128_ps_rmk(<4 x float> %a0, <4 x float> %a1,
;
; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmk(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP10]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -11939,10 +11939,10 @@ define <4 x float> @test_mask_vfmadd128_ps_rmka(<4 x float> %a0, <4 x float> %a1
;
; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmka(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP10]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -11985,9 +11985,9 @@ define <4 x float> @test_mask_vfmadd128_ps_rmkz(<4 x float> %a0, <4 x float> %a1
;
; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmkz(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -12015,9 +12015,9 @@ define <4 x float> @test_mask_vfmadd128_ps_rmkza(<4 x float> %a0, <4 x float> %a
;
; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmkza(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -12045,10 +12045,10 @@ define <4 x float> @test_mask_vfmadd128_ps_rmb(<4 x float> %a0, <4 x float> %a1,
;
; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmb(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP10]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -12103,10 +12103,10 @@ define <4 x float> @test_mask_vfmadd128_ps_rmba(<4 x float> %a0, <4 x float> %a1
;
; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmba(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP10]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -12161,9 +12161,9 @@ define <4 x float> @test_mask_vfmadd128_ps_rmbz(<4 x float> %a0, <4 x float> %a1
;
; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmbz(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -12203,9 +12203,9 @@ define <4 x float> @test_mask_vfmadd128_ps_rmbza(<4 x float> %a0, <4 x float> %a
;
; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmbza(
; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -12245,10 +12245,10 @@ define <2 x double> @test_mask_vfmadd128_pd_rmk(<2 x double> %a0, <2 x double> %
;
; CHECK-LABEL: define <2 x double> @test_mask_vfmadd128_pd_rmk(
; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], ptr [[PTR_A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP10]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -12291,9 +12291,9 @@ define <2 x double> @test_mask_vfmadd128_pd_rmkz(<2 x double> %a0, <2 x double>
;
; CHECK-LABEL: define <2 x double> @test_mask_vfmadd128_pd_rmkz(
; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], ptr [[PTR_A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -12321,10 +12321,10 @@ define <4 x double> @test_mask_vfmadd256_pd_rmk(<4 x double> %a0, <4 x double> %
;
; CHECK-LABEL: define <4 x double> @test_mask_vfmadd256_pd_rmk(
; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], ptr [[PTR_A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP10]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -12367,9 +12367,9 @@ define <4 x double> @test_mask_vfmadd256_pd_rmkz(<4 x double> %a0, <4 x double>
;
; CHECK-LABEL: define <4 x double> @test_mask_vfmadd256_pd_rmkz(
; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], ptr [[PTR_A2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics-upgrade.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics-upgrade.ll
index 2160961..5e93748 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics-upgrade.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics-upgrade.ll
@@ -16,8 +16,8 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpdpbusd.256(<8 x i32>, <8 x i32>, <8 x
define <8 x i32>@test_int_x86_avx512_vpdpbusd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpbusd_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP2]] to <32 x i8>
@@ -49,11 +49,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpbusd_256(<8 x i32> %x0, <8 x i32> %x1,
define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusd_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusd_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1:![0-9]+]]
@@ -141,8 +141,8 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpdpbusd.128(<4 x i32>, <4 x i32>, <4 x
define <4 x i32>@test_int_x86_avx512_vpdpbusd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpbusd_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8>
@@ -174,11 +174,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpbusd_128(<4 x i32> %x0, <4 x i32> %x1,
define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusd_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusd_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -270,8 +270,8 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpdpbusds.256(<8 x i32>, <8 x i32>, <8
define <8 x i32>@test_int_x86_avx512_vpdpbusds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpbusds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP2]] to <32 x i8>
@@ -303,11 +303,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpbusds_256(<8 x i32> %x0, <8 x i32> %x1,
define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -395,8 +395,8 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpdpbusds.128(<4 x i32>, <4 x i32>, <4
define <4 x i32>@test_int_x86_avx512_vpdpbusds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpbusds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8>
@@ -428,11 +428,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpbusds_128(<4 x i32> %x0, <4 x i32> %x1,
define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -524,8 +524,8 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpdpwssd.256(<8 x i32>, <8 x i32>, <8 x
define <8 x i32>@test_int_x86_avx512_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpwssd_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP22:%.*]] = bitcast <8 x i32> [[X1]] to <16 x i16>
@@ -557,11 +557,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1,
define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssd_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -649,8 +649,8 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpdpwssd.128(<4 x i32>, <4 x i32>, <4 x
define <4 x i32>@test_int_x86_avx512_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpwssd_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP22:%.*]] = bitcast <4 x i32> [[X1]] to <8 x i16>
@@ -682,11 +682,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1,
define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssd_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -779,8 +779,8 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpdpwssds.256(<8 x i32>, <8 x i32>, <8
define <8 x i32>@test_int_x86_avx512_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpwssds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP22:%.*]] = bitcast <8 x i32> [[X1]] to <16 x i16>
@@ -812,11 +812,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1,
define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -904,8 +904,8 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpdpwssds.128(<4 x i32>, <4 x i32>, <4
define <4 x i32>@test_int_x86_avx512_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpwssds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP22:%.*]] = bitcast <4 x i32> [[X1]] to <8 x i16>
@@ -937,11 +937,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1,
define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics.ll
index 26b1306..1d30468 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics.ll
@@ -15,8 +15,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpbusd.256(<8 x i32>, <32 x i8>, <32 x i8>)
define <8 x i32>@test_int_x86_avx512_vpdpbusd_256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpbusd_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], <32 x i8> [[X2:%.*]]) #[[ATTR1:[0-9]+]] {
-; CHECK-NEXT: [[TMP24:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[TMP24]], zeroinitializer
@@ -44,11 +44,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpbusd_256(<8 x i32> %x0, <32 x i8> %x1,
define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusd_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusd_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP33:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP33:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: [[TMP40:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: [[TMP40:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1:![0-9]+]]
@@ -131,8 +131,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpbusd.128(<4 x i32>, <16 x i8>, <16 x i8>)
define <4 x i32>@test_int_x86_avx512_vpdpbusd_128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpbusd_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], <16 x i8> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[TMP24]], zeroinitializer
@@ -160,11 +160,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpbusd_128(<4 x i32> %x0, <16 x i8> %x1,
define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusd_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusd_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP33:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP33:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP40:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP40:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -253,8 +253,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpbusds.256(<8 x i32>, <32 x i8>, <32 x i8>
define <8 x i32>@test_int_x86_avx512_vpdpbusds_256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpbusds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], <32 x i8> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP24:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[TMP24]], zeroinitializer
@@ -282,11 +282,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpbusds_256(<8 x i32> %x0, <32 x i8> %x1,
define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP33:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP33:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: [[TMP40:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: [[TMP40:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -369,8 +369,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpbusds.128(<4 x i32>, <16 x i8>, <16 x i8>
define <4 x i32>@test_int_x86_avx512_vpdpbusds_128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpbusds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], <16 x i8> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[TMP24]], zeroinitializer
@@ -398,11 +398,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpbusds_128(<4 x i32> %x0, <16 x i8> %x1,
define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP33:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP33:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP40:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP40:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -491,8 +491,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32>, <8 x i32>, <8 x i32>)
define <8 x i32>@test_int_x86_avx512_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpwssd_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP22:%.*]] = bitcast <8 x i32> [[X1]] to <16 x i16>
@@ -524,11 +524,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1,
define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssd_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -619,8 +619,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32>, <4 x i32>, <4 x i32>)
define <4 x i32>@test_int_x86_avx512_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpwssd_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP22:%.*]] = bitcast <4 x i32> [[X1]] to <8 x i16>
@@ -652,11 +652,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1,
define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssd_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -753,8 +753,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpwssds.256(<8 x i32>, <8 x i32>, <8 x i32>
define <8 x i32>@test_int_x86_avx512_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpwssds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP22:%.*]] = bitcast <8 x i32> [[X1]] to <16 x i16>
@@ -786,11 +786,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1,
define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -881,8 +881,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpwssds.128(<4 x i32>, <4 x i32>, <4 x i32>
define <4 x i32>@test_int_x86_avx512_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpwssds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -926,11 +926,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1,
define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4, i8 %x3) sanitize_memory {
; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics-upgrade.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics-upgrade.ll
index f6410c6..5c99f8a 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics-upgrade.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics-upgrade.ll
@@ -16,8 +16,8 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpdpbusd.512(<16 x i32>, <16 x i32>, <
define <16 x i32>@test_int_x86_avx512_vpdpbusd_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_vpdpbusd_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[X2:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i32> [[TMP2]] to <64 x i8>
@@ -49,11 +49,11 @@ define <16 x i32>@test_int_x86_avx512_vpdpbusd_512(<16 x i32> %x0, <16 x i32> %x
define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusd_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) sanitize_memory {
; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusd_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i32> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1:![0-9]+]]
@@ -141,8 +141,8 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpdpbusds.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_vpdpbusds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_vpdpbusds_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i32> [[TMP2]] to <64 x i8>
@@ -174,11 +174,11 @@ define <16 x i32>@test_int_x86_avx512_vpdpbusds_512(<16 x i32> %x0, <16 x i32> %
define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusds_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) sanitize_memory {
; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusds_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i32> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -266,8 +266,8 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpdpwssd.512(<16 x i32>, <16 x i32>, <
define <16 x i32>@test_int_x86_avx512_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_vpdpwssd_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP22:%.*]] = bitcast <16 x i32> [[X1]] to <32 x i16>
@@ -299,11 +299,11 @@ define <16 x i32>@test_int_x86_avx512_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x
define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) sanitize_memory {
; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssd_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i32> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -391,8 +391,8 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpdpwssds.512(<16 x i32>, <16 x i32>,
define <16 x i32>@test_int_x86_avx512_vpdpwssds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_vpdpwssds_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[X2:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP22:%.*]] = bitcast <16 x i32> [[X1]] to <32 x i16>
@@ -424,11 +424,11 @@ define <16 x i32>@test_int_x86_avx512_vpdpwssds_512(<16 x i32> %x0, <16 x i32> %
define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssds_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) sanitize_memory {
; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssds_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i32> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics.ll
index 6d4ce6d..236ff45 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics.ll
@@ -15,8 +15,8 @@ declare <16 x i32> @llvm.x86.avx512.vpdpbusd.512(<16 x i32>, <64 x i8>, <64 x i8
define <16 x i32> @test_int_x86_avx512_ask_vpdpbusd_512(<16 x i32> %x0, <64 x i8> %x1, <64 x i8> %x2) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_ask_vpdpbusd_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i8> [[X2:%.*]]) #[[ATTR1:[0-9]+]] {
-; CHECK-NEXT: [[TMP24:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[TMP24]], zeroinitializer
@@ -44,11 +44,11 @@ define <16 x i32> @test_int_x86_avx512_ask_vpdpbusd_512(<16 x i32> %x0, <64 x i8
define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusd_512(<16 x i32> %x0, <64 x i8> %x1, ptr %x2p, <64 x i8> %x4, i16 %x3) sanitize_memory {
; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusd_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <64 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <64 x i8> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP33:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP33:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: [[TMP40:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: [[TMP40:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1:![0-9]+]]
@@ -131,8 +131,8 @@ declare <16 x i32> @llvm.x86.avx512.vpdpbusds.512(<16 x i32>, <64 x i8>, <64 x i
define <16 x i32>@test_int_x86_avx512_vpdpbusds_512(<16 x i32> %x0, <64 x i8> %x1, <64 x i8> %x2) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_vpdpbusds_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i8> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP24:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[TMP24]], zeroinitializer
@@ -160,11 +160,11 @@ define <16 x i32>@test_int_x86_avx512_vpdpbusds_512(<16 x i32> %x0, <64 x i8> %x
define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusds_512(<16 x i32> %x0, <64 x i8> %x1, ptr %x2p, <64 x i8> %x4, i16 %x3) sanitize_memory {
; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusds_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <64 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <64 x i8> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP33:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP33:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: [[TMP40:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: [[TMP40:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -247,8 +247,8 @@ declare <16 x i32> @llvm.x86.avx512.vpdpwssd.512(<16 x i32>, <16 x i32>, <16 x i
define <16 x i32>@test_int_x86_avx512_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_vpdpwssd_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP22:%.*]] = bitcast <16 x i32> [[X1]] to <32 x i16>
@@ -280,11 +280,11 @@ define <16 x i32>@test_int_x86_avx512_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x
define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) sanitize_memory {
; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssd_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i32> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
@@ -375,8 +375,8 @@ declare <16 x i32> @llvm.x86.avx512.vpdpwssds.512(<16 x i32>, <16 x i32>, <16 x
define <16 x i32>@test_int_x86_avx512_ask_vpdpwssds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_ask_vpdpwssds_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP22:%.*]] = bitcast <16 x i32> [[X1]] to <32 x i16>
@@ -408,11 +408,11 @@ define <16 x i32>@test_int_x86_avx512_ask_vpdpwssds_512(<16 x i32> %x0, <16 x i3
define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssds_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) sanitize_memory {
; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssds_512(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i32> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx_vnni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx_vnni-intrinsics.ll
index 1de2a54..0344fbd 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx_vnni-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx_vnni-intrinsics.ll
@@ -15,8 +15,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpbusd.256(<8 x i32>, <32 x i8>, <32 x i8>)
define <8 x i32>@test_int_x86_avx_vpdpbusd_256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx_vpdpbusd_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], <32 x i8> [[X2:%.*]]) #[[ATTR1:[0-9]+]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[TMP4]], zeroinitializer
@@ -46,8 +46,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpbusd.128(<4 x i32>, <16 x i8>, <16 x i8>)
define <4 x i32>@test_int_x86_avx_vpdpbusd_128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx_vpdpbusd_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], <16 x i8> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer
@@ -77,8 +77,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpbusds.256(<8 x i32>, <32 x i8>, <32 x i8>
define <8 x i32>@test_int_x86_avx_vpdpbusds_256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx_vpdpbusds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], <32 x i8> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[TMP4]], zeroinitializer
@@ -108,8 +108,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpbusds.128(<4 x i32>, <16 x i8>, <16 x i8>
define <4 x i32>@test_int_x86_avx_vpdpbusds_128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx_vpdpbusds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], <16 x i8> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP23:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer
@@ -139,8 +139,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32>, <8 x i32>, <8 x i32>)
define <8 x i32>@test_int_x86_avx_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx_vpdpwssd_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[X1]] to <16 x i16>
@@ -174,8 +174,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32>, <4 x i32>, <4 x i32>)
define <4 x i32>@test_int_x86_avx_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx_vpdpwssd_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[X1]] to <8 x i16>
@@ -209,8 +209,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpwssds.256(<8 x i32>, <8 x i32>, <8 x i32>
define <8 x i32>@test_int_x86_avx_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx_vpdpwssds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[X1]] to <16 x i16>
@@ -244,8 +244,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpwssds.128(<4 x i32>, <4 x i32>, <4 x i32>
define <4 x i32>@test_int_x86_avx_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx_vpdpwssds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP21:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[X1]] to <8 x i16>
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint16-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint16-intrinsics.ll
index 66cbebe..707b46b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint16-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint16-intrinsics.ll
@@ -26,8 +26,8 @@ define <4 x i32> @test_int_x86_avx2_vpdpwsud_128(<4 x i32> %A, <4 x i32> %B, <4
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpwsud_128(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -44,8 +44,8 @@ define <8 x i32> @test_int_x86_avx2_vpdpwsud_256(<8 x i32> %A, <8 x i32> %B, <8
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpwsud_256(
; CHECK-SAME: <8 x i32> [[A:%.*]], <8 x i32> [[B:%.*]], <8 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
@@ -62,8 +62,8 @@ define <4 x i32> @test_int_x86_avx2_vpdpwsuds_128(<4 x i32> %A, <4 x i32> %B, <4
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpwsuds_128(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -80,8 +80,8 @@ define <8 x i32> @test_int_x86_avx2_vpdpwsuds_256(<8 x i32> %A, <8 x i32> %B, <8
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpwsuds_256(
; CHECK-SAME: <8 x i32> [[A:%.*]], <8 x i32> [[B:%.*]], <8 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
@@ -98,8 +98,8 @@ define <4 x i32> @test_int_x86_avx2_vpdpwusd_128(<4 x i32> %A, <4 x i32> %B, <4
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpwusd_128(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -116,8 +116,8 @@ define <8 x i32> @test_int_x86_avx2_vpdpwusd_256(<8 x i32> %A, <8 x i32> %B, <8
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpwusd_256(
; CHECK-SAME: <8 x i32> [[A:%.*]], <8 x i32> [[B:%.*]], <8 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
@@ -134,8 +134,8 @@ define <4 x i32> @test_int_x86_avx2_vpdpwusds_128(<4 x i32> %A, <4 x i32> %B, <4
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpwusds_128(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -152,8 +152,8 @@ define <8 x i32> @test_int_x86_avx2_vpdpwusds_256(<8 x i32> %A, <8 x i32> %B, <8
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpwusds_256(
; CHECK-SAME: <8 x i32> [[A:%.*]], <8 x i32> [[B:%.*]], <8 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
@@ -170,8 +170,8 @@ define <4 x i32> @test_int_x86_avx2_vpdpwuud_128(<4 x i32> %A, <4 x i32> %B, <4
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpwuud_128(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -188,8 +188,8 @@ define <8 x i32> @test_int_x86_avx2_vpdpwuud_256(<8 x i32> %A, <8 x i32> %B, <8
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpwuud_256(
; CHECK-SAME: <8 x i32> [[A:%.*]], <8 x i32> [[B:%.*]], <8 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
@@ -206,8 +206,8 @@ define <4 x i32> @test_int_x86_avx2_vpdpwuuds_128(<4 x i32> %A, <4 x i32> %B, <4
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpwuuds_128(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
@@ -224,8 +224,8 @@ define <8 x i32> @test_int_x86_avx2_vpdpwuuds_256(<8 x i32> %A, <8 x i32> %B, <8
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpwuuds_256(
; CHECK-SAME: <8 x i32> [[A:%.*]], <8 x i32> [[B:%.*]], <8 x i32> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll
index d91abea..4a70507 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll
@@ -15,10 +15,10 @@ declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>)
define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbssd_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1:[0-9]+]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP32:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP32:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1:![0-9]+]]
@@ -78,10 +78,10 @@ declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>)
define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbssds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP32:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP32:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -141,10 +141,10 @@ declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>)
define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbssd_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP32:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP32:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -204,10 +204,10 @@ declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>)
define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbssds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP32:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP32:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -267,10 +267,10 @@ declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>)
define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbsud_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -330,10 +330,10 @@ declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>)
define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbsuds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -393,10 +393,10 @@ declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>)
define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbsud_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -456,10 +456,10 @@ declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbsuds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -519,10 +519,10 @@ declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>)
define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbuud_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -582,10 +582,10 @@ declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>)
define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbuuds_128(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -645,10 +645,10 @@ declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>)
define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbuud_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -708,10 +708,10 @@ declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbuuds_256(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll
index e663a7b..cd2ccaf 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll
@@ -47,7 +47,7 @@ define void @test_x86_vcvtps2ph_256_m(ptr nocapture %d, <8 x float> %a) nounwind
; CHECK-LABEL: define void @test_x86_vcvtps2ph_256_m(
; CHECK-SAME: ptr captures(none) [[D:%.*]], <8 x float> [[A:%.*]]) #[[ATTR2:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP17:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP17:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <8 x i32> [[TMP17]], zeroinitializer
@@ -76,7 +76,7 @@ define void @test_x86_vcvtps2ph_128_m(ptr nocapture %d, <4 x float> %a) nounwind
; CHECK-LABEL: define void @test_x86_vcvtps2ph_128_m(
; CHECK-SAME: ptr captures(none) [[D:%.*]], <4 x float> [[A:%.*]]) #[[ATTR2]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <4 x i32> [[TMP9]], zeroinitializer
@@ -109,7 +109,7 @@ define void @test_x86_vcvtps2ph_128_m2(ptr nocapture %hf4x16, <4 x float> %f4X86
; CHECK-LABEL: define void @test_x86_vcvtps2ph_128_m2(
; CHECK-SAME: ptr captures(none) [[HF4X16:%.*]], <4 x float> [[F4X86:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[TMP0]], zeroinitializer
@@ -145,7 +145,7 @@ define void @test_x86_vcvtps2ph_128_m3(ptr nocapture %hf4x16, <4 x float> %f4X86
; CHECK-LABEL: define void @test_x86_vcvtps2ph_128_m3(
; CHECK-SAME: ptr captures(none) [[HF4X16:%.*]], <4 x float> [[F4X86:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[TMP0]], zeroinitializer
@@ -178,3 +178,6 @@ entry:
}
attributes #0 = { sanitize_memory }
+;.
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
+;.
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/mmx-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/mmx-intrinsics.ll
index 3d98f60..d62fd7e 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/mmx-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/mmx-intrinsics.ll
@@ -22,7 +22,7 @@ define i64 @test1(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test1(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
@@ -68,7 +68,7 @@ define i64 @test88(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test88(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
@@ -108,7 +108,7 @@ define i64 @test87(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test87(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -148,7 +148,7 @@ define i64 @test86(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test86(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -188,7 +188,7 @@ define i64 @test85(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test85(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
@@ -228,7 +228,7 @@ define i64 @test84(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test84(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -268,7 +268,7 @@ define i64 @test83(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test83(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -308,7 +308,7 @@ define i64 @test82(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test82(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
@@ -348,7 +348,7 @@ define i64 @test81(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test81(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -388,7 +388,7 @@ define i64 @test80(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test80(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -428,7 +428,7 @@ define i64 @test79(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test79(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
@@ -468,7 +468,7 @@ define i64 @test78(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test78(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -508,7 +508,7 @@ define i64 @test77(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test77(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -548,7 +548,7 @@ define i64 @test76(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test76(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP19:%.*]] = bitcast <1 x i64> [[TMP16]] to <4 x i16>
@@ -596,7 +596,7 @@ define i64 @test75(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test75(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP19:%.*]] = bitcast <1 x i64> [[TMP16]] to <2 x i32>
@@ -644,7 +644,7 @@ define i64 @test74(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test74(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP19:%.*]] = bitcast <1 x i64> [[TMP16]] to <4 x i16>
@@ -1049,7 +1049,7 @@ define i64 @test65(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32>
@@ -1094,7 +1094,7 @@ define i64 @test64(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -1139,7 +1139,7 @@ define i64 @test63(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP7]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
@@ -1178,7 +1178,7 @@ define i64 @test62(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32>
@@ -1223,7 +1223,7 @@ define i64 @test61(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -1268,7 +1268,7 @@ define i64 @test60(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP7]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
@@ -1307,7 +1307,7 @@ define i64 @test59(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32>
@@ -1352,7 +1352,7 @@ define i64 @test58(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -1396,7 +1396,7 @@ define i64 @test56(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test56(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
@@ -1436,7 +1436,7 @@ define i64 @test55(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test55(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
@@ -1476,7 +1476,7 @@ define i64 @test54(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test54(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
@@ -1516,7 +1516,7 @@ define i64 @test53(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test53(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
@@ -1556,7 +1556,7 @@ define i64 @test52(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test52(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -1594,7 +1594,7 @@ define i64 @test51(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test51(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -1634,7 +1634,7 @@ define i64 @test50(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test50(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -1674,7 +1674,7 @@ define i64 @test49(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test49(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP13:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP15:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP16:%.*]] = bitcast <1 x i64> [[TMP13]] to <4 x i16>
@@ -1732,7 +1732,7 @@ define i64 @test48(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test48(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -1772,7 +1772,7 @@ define i64 @test47(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test47(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -1812,7 +1812,7 @@ define i64 @test46(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test46(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -1852,7 +1852,7 @@ define i64 @test45(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test45(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -1891,7 +1891,7 @@ define i64 @test44(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP4]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
@@ -1926,7 +1926,7 @@ define i64 @test43(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test43(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
@@ -1966,7 +1966,7 @@ define i64 @test42(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test42(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -2006,7 +2006,7 @@ define i64 @test41(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test41(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -2046,7 +2046,7 @@ define i64 @test40(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test40(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -2086,7 +2086,7 @@ define i64 @test39(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test39(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -2126,7 +2126,7 @@ define i64 @test38(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test38(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -2166,7 +2166,7 @@ define i64 @test37(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test37(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -2207,7 +2207,7 @@ define i64 @test36(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP4]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
@@ -2240,7 +2240,7 @@ define i64 @test35(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test35(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
@@ -2280,7 +2280,7 @@ define i64 @test34(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test34(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -2320,7 +2320,7 @@ define i64 @test33(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test33(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -2360,7 +2360,7 @@ define i64 @test32(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test32(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = bitcast <1 x i64> [[TMP5]] to <8 x i8>
@@ -2399,7 +2399,7 @@ define i64 @test31(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test31(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -2439,7 +2439,7 @@ define i64 @test30(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test30(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -2479,7 +2479,7 @@ define i64 @test29(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test29(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -2519,7 +2519,7 @@ define i64 @test28(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test28(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -2559,7 +2559,7 @@ define i64 @test27(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test27(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -2599,7 +2599,7 @@ define i64 @test26(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test26(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
@@ -2639,7 +2639,7 @@ define void @test25(ptr %p, <1 x i64> %a) nounwind optsize ssp #0 {
; CHECK-LABEL: define void @test25(
; CHECK-SAME: ptr [[P:%.*]], <1 x i64> [[A:%.*]]) #[[ATTR3:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP2]], i32 0
@@ -2702,9 +2702,9 @@ define void @test23(<1 x i64> %d, <1 x i64> %n, ptr %p) nounwind optsize ssp #0
; CHECK-LABEL: define void @test23(
; CHECK-SAME: <1 x i64> [[D:%.*]], <1 x i64> [[N:%.*]], ptr [[P:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP4]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[N]] to <8 x i8>
@@ -2744,7 +2744,7 @@ define i64 @test22(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test22(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
@@ -2850,7 +2850,7 @@ define i64 @test20(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test20(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <1 x i64> [[TMP5]] to <2 x i32>
@@ -2975,7 +2975,7 @@ define i64 @test16(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP6:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP6]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
@@ -3112,7 +3112,7 @@ define i64 @test12(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test12(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
@@ -3152,7 +3152,7 @@ define i64 @test11(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test11(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
@@ -3192,7 +3192,7 @@ define i64 @test10(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test10(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <8 x i8>
@@ -3232,7 +3232,7 @@ define i64 @test9(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test9(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <8 x i8>
@@ -3273,7 +3273,7 @@ define i64 @test8(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test8(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
@@ -3313,7 +3313,7 @@ define i64 @test7(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test7(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP15:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP17:%.*]] = bitcast <1 x i64> [[TMP9]] to <8 x i8>
@@ -3371,7 +3371,7 @@ define i64 @test6(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test6(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
@@ -3417,7 +3417,7 @@ define i64 @test5(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test5(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
@@ -3463,7 +3463,7 @@ define i64 @test4(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test4(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
@@ -3509,7 +3509,7 @@ define i64 @test3(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test3(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
@@ -3555,7 +3555,7 @@ define i64 @test2(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test2(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
@@ -3603,7 +3603,7 @@ define <4 x float> @test89(<4 x float> %a, <1 x i64> %b) nounwind #0 {
; CHECK-LABEL: define <4 x float> @test89(
; CHECK-SAME: <4 x float> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR4:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -3647,7 +3647,7 @@ define <1 x i64> @test_mm_insert_pi16(<1 x i64> %a.coerce, i32 %d) nounwind #0 {
; CHECK-SAME: <1 x i64> [[A_COERCE:%.*]], i32 [[D:%.*]]) #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <1 x i64> [[TMP3]] to i64
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/sse-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/sse-intrinsics-x86.ll
index 9d7763a..46e8148 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/sse-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/sse-intrinsics-x86.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
define <4 x float> @test_x86_sse_cmp_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_cmp_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
@@ -25,7 +25,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind
define <4 x float> @test_x86_sse_cmp_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_cmp_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -45,7 +45,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind
define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comieq_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -64,7 +64,7 @@ declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comige_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comige_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -83,7 +83,7 @@ declare i32 @llvm.x86.sse.comige.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comigt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comigt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -102,7 +102,7 @@ declare i32 @llvm.x86.sse.comigt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comile_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comile_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -121,7 +121,7 @@ declare i32 @llvm.x86.sse.comile.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comilt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comilt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -140,7 +140,7 @@ declare i32 @llvm.x86.sse.comilt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comineq_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -162,9 +162,9 @@ define i32 @test_x86_sse_cvtss2si(<4 x float> %a0) #0 {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]]
; CHECK: 3:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5:[0-9]+]]
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
; CHECK-NEXT: unreachable
; CHECK: 4:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse.cvtss2si(<4 x float> [[A0:%.*]])
@@ -183,9 +183,9 @@ define i32 @test_x86_sse_cvttss2si(<4 x float> %a0) #0 {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: 4:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse.cvttss2si(<4 x float> [[A0:%.*]])
@@ -209,9 +209,9 @@ define void @test_x86_sse_ldmxcsr(ptr %a0) #0 {
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i32 [[_LDMXCSR]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
; CHECK: 5:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: 6:
; CHECK-NEXT: call void @llvm.x86.sse.ldmxcsr(ptr [[A0]])
@@ -227,7 +227,7 @@ declare void @llvm.x86.sse.ldmxcsr(ptr) nounwind
define <4 x float> @test_x86_sse_max_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_max_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
@@ -243,7 +243,7 @@ declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_x86_sse_max_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_max_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 4, i32 1, i32 2, i32 3>
@@ -260,7 +260,7 @@ declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_x86_sse_min_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_min_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
@@ -276,7 +276,7 @@ declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_x86_sse_min_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_min_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 4, i32 1, i32 2, i32 3>
@@ -296,9 +296,9 @@ define i32 @test_x86_sse_movmsk_ps(<4 x float> %a0) #0 {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
; CHECK: 3:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: 4:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> [[A0:%.*]])
@@ -377,9 +377,9 @@ define void @test_x86_sse_stmxcsr(ptr %a0) #0 {
; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT: store i32 0, ptr [[TMP4]], align 4
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
; CHECK: 5:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: 6:
; CHECK-NEXT: call void @llvm.x86.sse.stmxcsr(ptr [[A0]])
@@ -394,7 +394,7 @@ declare void @llvm.x86.sse.stmxcsr(ptr) nounwind
define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomieq_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -413,7 +413,7 @@ declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomige_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomige_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -432,7 +432,7 @@ declare i32 @llvm.x86.sse.ucomige.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomigt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -451,7 +451,7 @@ declare i32 @llvm.x86.sse.ucomigt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomile_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomile_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -470,7 +470,7 @@ declare i32 @llvm.x86.sse.ucomile.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomilt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomilt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -489,7 +489,7 @@ declare i32 @llvm.x86.sse.ucomilt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomineq_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/sse2-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/sse2-intrinsics-x86.ll
index 7048050..fc7b01b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/sse2-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/sse2-intrinsics-x86.ll
@@ -15,7 +15,7 @@ target triple = "x86_64-unknown-linux-gnu"
define <2 x double> @test_x86_sse2_cmp_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_cmp_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -33,7 +33,7 @@ declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounw
define <2 x double> @test_x86_sse2_cmp_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_cmp_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -53,7 +53,7 @@ declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounw
define i32 @test_x86_sse2_comieq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comieq_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -72,7 +72,7 @@ declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comige_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comige_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -91,7 +91,7 @@ declare i32 @llvm.x86.sse2.comige.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comigt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comigt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -110,7 +110,7 @@ declare i32 @llvm.x86.sse2.comigt.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comile_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comile_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -129,7 +129,7 @@ declare i32 @llvm.x86.sse2.comile.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comilt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comilt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -148,7 +148,7 @@ declare i32 @llvm.x86.sse2.comilt.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comineq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comineq_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -340,7 +340,7 @@ declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
define <4 x float> @test_x86_sse2_cvtsd2ss(<4 x float> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_cvtsd2ss(
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
@@ -363,7 +363,7 @@ declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind
define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, ptr %p1) #0 {
; CHECK-LABEL: @test_x86_sse2_cvtsd2ss_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -397,7 +397,7 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, ptr %p1) #0 {
define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, ptr %p1) optsize #0 {
; CHECK-LABEL: @test_x86_sse2_cvtsd2ss_load_optsize(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -542,7 +542,7 @@ declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
define <2 x double> @test_x86_sse2_max_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_max_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
@@ -558,7 +558,7 @@ declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_x86_sse2_max_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_max_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <2 x i32> <i32 2, i32 1>
@@ -575,7 +575,7 @@ declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_x86_sse2_min_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_min_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
@@ -591,7 +591,7 @@ declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_x86_sse2_min_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_min_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <2 x i32> <i32 2, i32 1>
@@ -629,7 +629,7 @@ declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
define <8 x i16> @test_x86_sse2_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_packssdw_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
@@ -662,7 +662,7 @@ define <8 x i16> @test_x86_sse2_packssdw_128_fold() #0 {
define <16 x i8> @test_x86_sse2_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_packsswb_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16>
@@ -695,7 +695,7 @@ define <16 x i8> @test_x86_sse2_packsswb_128_fold() #0 {
define <16 x i8> @test_x86_sse2_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_packuswb_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16>
@@ -728,7 +728,7 @@ define <16 x i8> @test_x86_sse2_packuswb_128_fold() #0 {
define <16 x i8> @test_x86_sse2_pavg_b(<16 x i8> %a0, <16 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pavg_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> [[A0:%.*]], <16 x i8> [[A1:%.*]])
@@ -744,7 +744,7 @@ declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @test_x86_sse2_pavg_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pavg_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
@@ -760,7 +760,7 @@ declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
define <4 x i32> @test_x86_sse2_pmadd_wd(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pmadd_wd(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i16> [[TMP2]], zeroinitializer
@@ -809,7 +809,7 @@ declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
define <8 x i16> @test_x86_sse2_pmulh_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pmulh_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
@@ -825,7 +825,7 @@ declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_x86_sse2_pmulhu_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pmulhu_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
@@ -841,7 +841,7 @@ declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnon
define <2 x i64> @test_x86_sse2_psad_bw(<16 x i8> %a0, <16 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psad_bw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x i64>
@@ -861,7 +861,7 @@ declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_x86_sse2_psll_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psll_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -883,7 +883,7 @@ declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_x86_sse2_psll_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psll_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -905,7 +905,7 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_x86_sse2_psll_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psll_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -975,7 +975,7 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone
define <4 x i32> @test_x86_sse2_psra_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psra_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -997,7 +997,7 @@ declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i16> @test_x86_sse2_psra_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psra_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1051,7 +1051,7 @@ declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone
define <4 x i32> @test_x86_sse2_psrl_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psrl_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1073,7 +1073,7 @@ declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_x86_sse2_psrl_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psrl_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1095,7 +1095,7 @@ declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_x86_sse2_psrl_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psrl_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1116,7 +1116,7 @@ declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_x86_sse2_psrl_w_load(<8 x i16> %a0, ptr %p) #0 {
; CHECK-LABEL: @test_x86_sse2_psrl_w_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -1198,7 +1198,7 @@ declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone
define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomieq_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -1217,7 +1217,7 @@ declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomige_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomige_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -1236,7 +1236,7 @@ declare i32 @llvm.x86.sse2.ucomige.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomigt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -1255,7 +1255,7 @@ declare i32 @llvm.x86.sse2.ucomigt.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomile_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomile_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -1274,7 +1274,7 @@ declare i32 @llvm.x86.sse2.ucomile.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomilt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -1293,7 +1293,7 @@ declare i32 @llvm.x86.sse2.ucomilt.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomineq_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/sse41-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/sse41-intrinsics-x86.ll
index 1fcab72..618dde9 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/sse41-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/sse41-intrinsics-x86.ll
@@ -6,8 +6,8 @@ target triple = "x86_64-unknown-linux-gnu"
define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: @test_x86_sse41_blendvpd(
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x double> [[A2:%.*]] to <2 x i64>
@@ -34,8 +34,8 @@ declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x d
define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: @test_x86_sse41_blendvps(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x float> [[A2:%.*]] to <4 x i32>
@@ -63,7 +63,7 @@ declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x floa
define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_dppd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = select <2 x i1> <i1 false, i1 true>, <2 x i64> [[TMP3]], <2 x i64> zeroinitializer
@@ -84,7 +84,7 @@ declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwi
define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_dpps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> [[TMP3]], <4 x i32> zeroinitializer
@@ -105,7 +105,7 @@ declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind
define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_insertps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -131,7 +131,7 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounw
define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_mpsadbw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -155,7 +155,7 @@ declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind rea
define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(ptr %ptr, <16 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_mpsadbw_load_op0(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
@@ -190,7 +190,7 @@ define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(ptr %ptr, <16 x i8> %a1) #0 {
define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_packusdw(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
@@ -222,8 +222,8 @@ define <8 x i16> @test_x86_sse41_packusdw_fold() #0 {
define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) #0 {
; CHECK-LABEL: @test_x86_sse41_pblendvb(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ashr <16 x i8> [[A2:%.*]], splat (i8 7)
@@ -262,7 +262,7 @@ declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_ptestc(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -281,7 +281,7 @@ declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
define i32 @test_x86_sse41_ptestnzc(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_ptestnzc(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -300,7 +300,7 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
define i32 @test_x86_sse41_ptestz(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_ptestz(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -347,7 +347,7 @@ declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_round_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP2]], <2 x i32> <i32 2, i32 1>
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], i32 7)
@@ -362,7 +362,7 @@ declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) n
define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, ptr %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_round_sd_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -389,7 +389,7 @@ define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, ptr %a1) #0
define <4 x float> @test_x86_sse41_round_ss_load(<4 x float> %a0, ptr %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_round_ss_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll
index 9a7f4b98..bd96612 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll
@@ -26,8 +26,8 @@ entry:
ret i64 %ret
}
-; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
+; If the size of __msan_va_arg_tls changes the second argument of `getelementptr` must also be changed.
; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
+; CHECK: getelementptr (i8, ptr @__msan_va_arg_tls, i64 792)
+; CHECK-NOT: getelementptr (i8, ptr @__msan_va_arg_tls, i64 800)
declare i64 @sum(i64 %n, ...)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
index b61cb6a..bec2ba9 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
@@ -16,15 +16,15 @@ entry:
ret i32 %call
}
-; CHECK: store i32 0, {{.*}} @__msan_param_tls {{.*}} i64 8
-; CHECK: store i32 0, {{.*}} @__msan_param_tls {{.*}} i64 16
-; CHECK: store i32 0, {{.*}} @__msan_param_tls {{.*}} i64 24
-; CHECK: store i32 0, {{.*}} @__msan_va_arg_tls {{.*}} i64 8
-; CHECK-ORIGIN: store i32 0, {{.*}} @__msan_va_arg_origin_tls {{.*}} i64 8
-; CHECK: store i32 0, {{.*}} @__msan_va_arg_tls {{.*}} i64 16
-; CHECK-ORIGIN: store i32 0, {{.*}} @__msan_va_arg_origin_tls {{.*}} i64 16
-; CHECK: store i32 0, {{.*}} @__msan_va_arg_tls {{.*}} i64 24
-; CHECK-ORIGIN: store i32 0, {{.*}} @__msan_va_arg_origin_tls {{.*}} i64 24
+; CHECK: store i32 0, {{.*}} @__msan_param_tls, i64 8
+; CHECK: store i32 0, {{.*}} @__msan_param_tls, i64 16
+; CHECK: store i32 0, {{.*}} @__msan_param_tls, i64 24
+; CHECK: store i32 0, {{.*}} @__msan_va_arg_tls, i64 8
+; CHECK-ORIGIN: store i32 0, {{.*}} @__msan_va_arg_origin_tls, i64 8
+; CHECK: store i32 0, {{.*}} @__msan_va_arg_tls, i64 16
+; CHECK-ORIGIN: store i32 0, {{.*}} @__msan_va_arg_origin_tls, i64 16
+; CHECK: store i32 0, {{.*}} @__msan_va_arg_tls, i64 24
+; CHECK-ORIGIN: store i32 0, {{.*}} @__msan_va_arg_origin_tls, i64 24
define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
entry:
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
index 4bc14da..c549c16 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
@@ -39,9 +39,9 @@ define linkonce_odr dso_local void @_Z4testIcEvT_(i8 noundef signext %arg) sanit
; CHECK-NEXT: [[_MSPROP:%.*]] = sext i8 [[_MSLD]] to i32
; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32
; CHECK-NEXT: store i8 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 [[_MSPROP]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i32 [[_MSPROP]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i8, i32, ...) @_Z5test2IcEvT_iz(i8 noundef signext [[TMP7]], i32 noundef 1, i32 noundef [[CONV]])
; CHECK-NEXT: ret void
@@ -80,9 +80,9 @@ define linkonce_odr dso_local void @_Z4testIiEvT_(i32 noundef %arg) sanitize_mem
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 4
; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i32 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i32, i32, ...) @_Z5test2IiEvT_iz(i32 noundef [[TMP7]], i32 noundef 1, i32 noundef [[TMP7]])
; CHECK-NEXT: ret void
@@ -122,9 +122,9 @@ define linkonce_odr dso_local void @_Z4testIfEvT_(float noundef %arg) sanitize_m
; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[_MSLD]] to i64
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[TMP7]] to double
; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[TMP11]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[TMP11]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (float, i32, ...) @_Z5test2IfEvT_iz(float noundef [[TMP7]], i32 noundef 1, double noundef [[CONV]])
; CHECK-NEXT: ret void
@@ -163,9 +163,9 @@ define linkonce_odr dso_local void @_Z4testIdEvT_(double noundef %arg) sanitize_
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (double, i32, ...) @_Z5test2IdEvT_iz(double noundef [[TMP7]], i32 noundef 1, double noundef [[TMP7]])
; CHECK-NEXT: ret void
@@ -203,9 +203,9 @@ define linkonce_odr dso_local void @_Z4testIeEvT_(x86_fp80 noundef %arg) sanitiz
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i80, ptr [[TMP10]], align 16
; CHECK-NEXT: store i80 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i80 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i80 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i80 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i80 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), align 8
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (x86_fp80, i32, ...) @_Z5test2IeEvT_iz(x86_fp80 noundef [[TMP7]], i32 noundef 1, x86_fp80 noundef [[TMP7]])
; CHECK-NEXT: ret void
@@ -243,9 +243,9 @@ define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(i64 %arg.coerce) sanitiz
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP9]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i64, i32, ...) @_Z5test2I6IntIntEvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]])
; CHECK-NEXT: ret void
@@ -264,7 +264,7 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(i64 %arg.coerce0, i
; CHECK-SAME: i64 [[ARG_COERCE0:%.*]], i64 [[ARG_COERCE1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_INT64INT64:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
@@ -295,12 +295,12 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(i64 %arg.coerce0, i
; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
; CHECK-NEXT: [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i64, i64, i32, ...) @_Z5test2I10Int64Int64EvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]])
; CHECK-NEXT: ret void
@@ -322,7 +322,7 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(double %arg.coerc
; CHECK-SAME: double [[ARG_COERCE0:%.*]], double [[ARG_COERCE1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_DOUBLEDOUBLE:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
@@ -353,12 +353,12 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(double %arg.coerc
; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
; CHECK-NEXT: [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (double, double, i32, ...) @_Z5test2I12DoubleDoubleEvT_iz(double [[AGG_TMP_SROA_0_0_COPYLOAD]], double [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, double [[AGG_TMP_SROA_0_0_COPYLOAD]], double [[AGG_TMP_SROA_2_0_COPYLOAD]])
; CHECK-NEXT: ret void
@@ -390,15 +390,15 @@ define linkonce_odr dso_local void @_Z4testI7Double4EvT_(ptr noundef byval(%stru
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 32, i1 false)
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP8]], i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP8]], i64 32, i1 false)
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP11]], i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP11]], i64 32, i1 false)
; CHECK-NEXT: store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I7Double4EvT_iz(ptr noundef nonnull byval([[STRUCT_DOUBLE4]]) align 8 [[ARG]], i32 noundef 1, ptr noundef nonnull byval([[STRUCT_DOUBLE4]]) align 8 [[ARG]])
; CHECK-NEXT: ret void
@@ -416,7 +416,7 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(double %arg.coerce
; CHECK-SAME: double [[ARG_COERCE0:%.*]], float [[ARG_COERCE1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_DOUBLEFLOAT:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
@@ -447,12 +447,12 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(double %arg.coerce
; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
; CHECK-NEXT: [[_MSLD1:%.*]] = load i32, ptr [[TMP17]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: store i32 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i32 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8
+; CHECK-NEXT: store i32 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (double, float, i32, ...) @_Z5test2I11DoubleFloatEvT_iz(double [[AGG_TMP_SROA_0_0_COPYLOAD]], float [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, double [[AGG_TMP_SROA_0_0_COPYLOAD]], float [[AGG_TMP_SROA_2_0_COPYLOAD]])
; CHECK-NEXT: ret void
@@ -484,15 +484,15 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble2EvT_(ptr noundef byval(
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 32, i1 false)
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP8]], i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP8]], i64 32, i1 false)
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP11]], i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP11]], i64 32, i1 false)
; CHECK-NEXT: store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I11LongDouble2EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE2]]) align 16 [[ARG]], i32 noundef 1, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE2]]) align 16 [[ARG]])
; CHECK-NEXT: ret void
@@ -518,15 +518,15 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble4EvT_(ptr noundef byval(
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 64, i1 false)
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), ptr align 8 [[TMP8]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 72), ptr align 8 [[TMP8]], i64 64, i1 false)
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP11]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP11]], i64 64, i1 false)
; CHECK-NEXT: store i64 64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], i32 noundef 1, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]])
; CHECK-NEXT: ret void
@@ -561,17 +561,13 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
@@ -624,17 +620,13 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
@@ -679,17 +671,13 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
@@ -734,17 +722,13 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
@@ -789,17 +773,13 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
@@ -844,17 +824,13 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
@@ -899,17 +875,13 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
@@ -954,17 +926,13 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
@@ -1009,17 +977,13 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
@@ -1064,17 +1028,13 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
@@ -1119,17 +1079,13 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
@@ -1174,17 +1130,13 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
@@ -1222,88 +1174,88 @@ define linkonce_odr dso_local void @_Z4test3I11LongDouble4EvT_(ptr noundef byval
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 64, i1 false)
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), ptr align 8 [[TMP8]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 72), ptr align 8 [[TMP8]], i64 64, i1 false)
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), ptr align 8 [[TMP11]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 136), ptr align 8 [[TMP11]], i64 64, i1 false)
; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), ptr align 8 [[TMP14]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 200), ptr align 8 [[TMP14]], i64 64, i1 false)
; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), ptr align 8 [[TMP17]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 264), ptr align 8 [[TMP17]], i64 64, i1 false)
; CHECK-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], 87960930222080
; CHECK-NEXT: [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), ptr align 8 [[TMP20]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 328), ptr align 8 [[TMP20]], i64 64, i1 false)
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), ptr align 8 [[TMP23]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 392), ptr align 8 [[TMP23]], i64 64, i1 false)
; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], 87960930222080
; CHECK-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), ptr align 8 [[TMP26]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 456), ptr align 8 [[TMP26]], i64 64, i1 false)
; CHECK-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP28:%.*]] = xor i64 [[TMP27]], 87960930222080
; CHECK-NEXT: [[TMP29:%.*]] = inttoptr i64 [[TMP28]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), ptr align 8 [[TMP29]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 520), ptr align 8 [[TMP29]], i64 64, i1 false)
; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP31:%.*]] = xor i64 [[TMP30]], 87960930222080
; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), ptr align 8 [[TMP32]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 584), ptr align 8 [[TMP32]], i64 64, i1 false)
; CHECK-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP34:%.*]] = xor i64 [[TMP33]], 87960930222080
; CHECK-NEXT: [[TMP35:%.*]] = inttoptr i64 [[TMP34]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), ptr align 8 [[TMP35]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 648), ptr align 8 [[TMP35]], i64 64, i1 false)
; CHECK-NEXT: [[TMP36:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP37:%.*]] = xor i64 [[TMP36]], 87960930222080
; CHECK-NEXT: [[TMP38:%.*]] = inttoptr i64 [[TMP37]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), ptr align 8 [[TMP38]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 712), ptr align 8 [[TMP38]], i64 64, i1 false)
; CHECK-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP40:%.*]] = xor i64 [[TMP39]], 87960930222080
; CHECK-NEXT: [[TMP41:%.*]] = inttoptr i64 [[TMP40]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP41]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP41]], i64 64, i1 false)
; CHECK-NEXT: [[TMP42:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP43:%.*]] = xor i64 [[TMP42]], 87960930222080
; CHECK-NEXT: [[TMP44:%.*]] = inttoptr i64 [[TMP43]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), ptr align 8 [[TMP44]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 240), ptr align 8 [[TMP44]], i64 64, i1 false)
; CHECK-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP46:%.*]] = xor i64 [[TMP45]], 87960930222080
; CHECK-NEXT: [[TMP47:%.*]] = inttoptr i64 [[TMP46]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), ptr align 8 [[TMP47]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 304), ptr align 8 [[TMP47]], i64 64, i1 false)
; CHECK-NEXT: [[TMP48:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP49:%.*]] = xor i64 [[TMP48]], 87960930222080
; CHECK-NEXT: [[TMP50:%.*]] = inttoptr i64 [[TMP49]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), ptr align 8 [[TMP50]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 368), ptr align 8 [[TMP50]], i64 64, i1 false)
; CHECK-NEXT: [[TMP51:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP52:%.*]] = xor i64 [[TMP51]], 87960930222080
; CHECK-NEXT: [[TMP53:%.*]] = inttoptr i64 [[TMP52]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), ptr align 8 [[TMP53]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 432), ptr align 8 [[TMP53]], i64 64, i1 false)
; CHECK-NEXT: [[TMP54:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP55:%.*]] = xor i64 [[TMP54]], 87960930222080
; CHECK-NEXT: [[TMP56:%.*]] = inttoptr i64 [[TMP55]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), ptr align 8 [[TMP56]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 496), ptr align 8 [[TMP56]], i64 64, i1 false)
; CHECK-NEXT: [[TMP57:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP58:%.*]] = xor i64 [[TMP57]], 87960930222080
; CHECK-NEXT: [[TMP59:%.*]] = inttoptr i64 [[TMP58]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), ptr align 8 [[TMP59]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 560), ptr align 8 [[TMP59]], i64 64, i1 false)
; CHECK-NEXT: [[TMP60:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP61:%.*]] = xor i64 [[TMP60]], 87960930222080
; CHECK-NEXT: [[TMP62:%.*]] = inttoptr i64 [[TMP61]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), ptr align 8 [[TMP62]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 624), ptr align 8 [[TMP62]], i64 64, i1 false)
; CHECK-NEXT: [[TMP63:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP64:%.*]] = xor i64 [[TMP63]], 87960930222080
; CHECK-NEXT: [[TMP65:%.*]] = inttoptr i64 [[TMP64]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), ptr align 8 [[TMP65]], i64 64, i1 false)
-; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), i8 0, i32 48, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 688), ptr align 8 [[TMP65]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 752), i8 0, i32 48, i1 false)
; CHECK-NEXT: store i64 1280, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], i32 noundef 20, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]])
; CHECK-NEXT: ret void
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/x86-vpermi2.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/x86-vpermi2.ll
index 429829ef..8a9cf60 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/x86-vpermi2.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/x86-vpermi2.ll
@@ -14,7 +14,7 @@ define <2 x i64> @shuffle_vpermv3_v2i64(<2 x i64> %x0, <2 x i64> %x1) #0 {
; CHECK-LABEL: define <2 x i64> @shuffle_vpermv3_v2i64(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> [[TMP1]], <2 x i64> <i64 2, i64 0>, <2 x i64> [[TMP2]])
; CHECK-NEXT: [[R:%.*]] = call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> [[X0]], <2 x i64> <i64 2, i64 0>, <2 x i64> [[X1]])
@@ -42,9 +42,9 @@ define <2 x i64> @shuffle_vpermv3_v2i64_unary(<2 x i64> %x0) #0 {
define <2 x i64> @shuffle_vpermv3_v2i64_demandedbits(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %m) #0 {
; CHECK-LABEL: define <2 x i64> @shuffle_vpermv3_v2i64_demandedbits(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i64> [[M]], splat (i64 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <2 x i64> [[TMP4]], zeroinitializer
@@ -74,9 +74,9 @@ define <2 x i64> @shuffle_vpermv3_v2i64_demandedbits(<2 x i64> %x0, <2 x i64> %x
define <2 x i64> @shuffle_vpermv3_v2i64_demandedbits_negative(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %m) #0 {
; CHECK-LABEL: define <2 x i64> @shuffle_vpermv3_v2i64_demandedbits_negative(
; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i64> [[M]], splat (i64 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <2 x i64> [[TMP4]], zeroinitializer
@@ -107,7 +107,7 @@ define <4 x i64> @shuffle_vpermv3_v4i64(<4 x i64> %x0, <4 x i64> %x1) #0 {
; CHECK-LABEL: define <4 x i64> @shuffle_vpermv3_v4i64(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> [[TMP1]], <4 x i64> <i64 7, i64 2, i64 6, i64 0>, <4 x i64> [[TMP2]])
; CHECK-NEXT: [[R:%.*]] = call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> [[X0]], <4 x i64> <i64 7, i64 2, i64 6, i64 0>, <4 x i64> [[X1]])
@@ -135,9 +135,9 @@ define <4 x i64> @shuffle_vpermv3_v4i64_unary(<4 x i64> %x0) #0 {
define <4 x i64> @shuffle_vpermv3_v4i64_demandedbits(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %m) #0 {
; CHECK-LABEL: define <4 x i64> @shuffle_vpermv3_v4i64_demandedbits(
; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i64> [[M]], splat (i64 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <4 x i64> [[TMP4]], zeroinitializer
@@ -168,7 +168,7 @@ define <8 x i64> @shuffle_vpermv3_v8i64(<8 x i64> %x0, <8 x i64> %x1) #0 {
; CHECK-LABEL: define <8 x i64> @shuffle_vpermv3_v8i64(
; CHECK-SAME: <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> <i64 8, i64 6, i64 10, i64 4, i64 12, i64 2, i64 14, i64 0>, <8 x i64> [[TMP2]])
; CHECK-NEXT: [[R:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[X0]], <8 x i64> <i64 8, i64 6, i64 10, i64 4, i64 12, i64 2, i64 14, i64 0>, <8 x i64> [[X1]])
@@ -196,9 +196,9 @@ define <8 x i64> @shuffle_vpermv3_v8i64_unary(<8 x i64> %x0) #0 {
define <8 x i64> @shuffle_vpermv3_v8i64_demandedbits(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %m) #0 {
; CHECK-LABEL: define <8 x i64> @shuffle_vpermv3_v8i64_demandedbits(
; CHECK-SAME: <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i64> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <8 x i64> [[M]], splat (i64 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i64> [[TMP4]], zeroinitializer
@@ -233,7 +233,7 @@ define <4 x i32> @shuffle_vpermv3_v4i32(<4 x i32> %x0, <4 x i32> %x1) #0 {
; CHECK-LABEL: define <4 x i32> @shuffle_vpermv3_v4i32(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> [[TMP1]], <4 x i32> <i32 7, i32 2, i32 6, i32 0>, <4 x i32> [[TMP2]])
; CHECK-NEXT: [[R:%.*]] = call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> [[X0]], <4 x i32> <i32 7, i32 2, i32 6, i32 0>, <4 x i32> [[X1]])
@@ -261,9 +261,9 @@ define <4 x i32> @shuffle_vpermv3_v4i32_unary(<4 x i32> %x0) #0 {
define <4 x i32> @shuffle_vpermv3_v4i32_demandedbits(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %m) #0 {
; CHECK-LABEL: define <4 x i32> @shuffle_vpermv3_v4i32_demandedbits(
; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[M]], splat (i32 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <4 x i32> [[TMP4]], zeroinitializer
@@ -294,7 +294,7 @@ define <8 x i32> @shuffle_vpermv3_v8i32(<8 x i32> %x0, <8 x i32> %x1) #0 {
; CHECK-LABEL: define <8 x i32> @shuffle_vpermv3_v8i32(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> [[TMP1]], <8 x i32> <i32 8, i32 6, i32 10, i32 4, i32 12, i32 2, i32 14, i32 0>, <8 x i32> [[TMP2]])
; CHECK-NEXT: [[R:%.*]] = call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> [[X0]], <8 x i32> <i32 8, i32 6, i32 10, i32 4, i32 12, i32 2, i32 14, i32 0>, <8 x i32> [[X1]])
@@ -322,9 +322,9 @@ define <8 x i32> @shuffle_vpermv3_v8i32_unary(<8 x i32> %x0) #0 {
define <8 x i32> @shuffle_vpermv3_v8i32_demandedbits(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %m) #0 {
; CHECK-LABEL: define <8 x i32> @shuffle_vpermv3_v8i32_demandedbits(
; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <8 x i32> [[M]], splat (i32 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i32> [[TMP4]], zeroinitializer
@@ -355,7 +355,7 @@ define <16 x i32> @shuffle_vpermv3_v16i32(<16 x i32> %x0, <16 x i32> %x1) #0 {
; CHECK-LABEL: define <16 x i32> @shuffle_vpermv3_v16i32(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> [[TMP1]], <16 x i32> <i32 16, i32 14, i32 18, i32 12, i32 20, i32 10, i32 22, i32 8, i32 24, i32 6, i32 26, i32 4, i32 28, i32 2, i32 30, i32 0>, <16 x i32> [[TMP2]])
; CHECK-NEXT: [[R:%.*]] = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> [[X0]], <16 x i32> <i32 16, i32 14, i32 18, i32 12, i32 20, i32 10, i32 22, i32 8, i32 24, i32 6, i32 26, i32 4, i32 28, i32 2, i32 30, i32 0>, <16 x i32> [[X1]])
@@ -383,9 +383,9 @@ define <16 x i32> @shuffle_vpermv3_v16i32_unary(<16 x i32> %x0) #0 {
define <16 x i32> @shuffle_vpermv3_v16i32_demandedbits(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %m) #0 {
; CHECK-LABEL: define <16 x i32> @shuffle_vpermv3_v16i32_demandedbits(
; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <16 x i32> [[M]], splat (i32 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i32> [[TMP4]], zeroinitializer
@@ -420,7 +420,7 @@ define <8 x i16> @shuffle_vpermv3_v8i16(<8 x i16> %x0, <8 x i16> %x1) #0 {
; CHECK-LABEL: define <8 x i16> @shuffle_vpermv3_v8i16(
; CHECK-SAME: <8 x i16> [[X0:%.*]], <8 x i16> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i16> @llvm.x86.avx512.vpermi2var.hi.128(<8 x i16> [[TMP1]], <8 x i16> <i16 8, i16 6, i16 10, i16 4, i16 12, i16 2, i16 14, i16 0>, <8 x i16> [[TMP2]])
; CHECK-NEXT: [[R:%.*]] = call <8 x i16> @llvm.x86.avx512.vpermi2var.hi.128(<8 x i16> [[X0]], <8 x i16> <i16 8, i16 6, i16 10, i16 4, i16 12, i16 2, i16 14, i16 0>, <8 x i16> [[X1]])
@@ -448,9 +448,9 @@ define <8 x i16> @shuffle_vpermv3_v8i16_unary(<8 x i16> %x0) #0 {
define <8 x i16> @shuffle_vpermv3_v8i16_demandedbits(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %m) #0 {
; CHECK-LABEL: define <8 x i16> @shuffle_vpermv3_v8i16_demandedbits(
; CHECK-SAME: <8 x i16> [[X0:%.*]], <8 x i16> [[X1:%.*]], <8 x i16> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <8 x i16> [[M]], splat (i16 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i16> [[TMP4]], zeroinitializer
@@ -481,7 +481,7 @@ define <16 x i16> @shuffle_vpermv3_v16i16(<16 x i16> %x0, <16 x i16> %x1) #0 {
; CHECK-LABEL: define <16 x i16> @shuffle_vpermv3_v16i16(
; CHECK-SAME: <16 x i16> [[X0:%.*]], <16 x i16> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <16 x i16> @llvm.x86.avx512.vpermi2var.hi.256(<16 x i16> [[TMP1]], <16 x i16> <i16 16, i16 14, i16 18, i16 12, i16 20, i16 10, i16 22, i16 8, i16 24, i16 6, i16 26, i16 4, i16 28, i16 2, i16 30, i16 0>, <16 x i16> [[TMP2]])
; CHECK-NEXT: [[R:%.*]] = call <16 x i16> @llvm.x86.avx512.vpermi2var.hi.256(<16 x i16> [[X0]], <16 x i16> <i16 16, i16 14, i16 18, i16 12, i16 20, i16 10, i16 22, i16 8, i16 24, i16 6, i16 26, i16 4, i16 28, i16 2, i16 30, i16 0>, <16 x i16> [[X1]])
@@ -509,9 +509,9 @@ define <16 x i16> @shuffle_vpermv3_v16i16_unary(<16 x i16> %x0) #0 {
define <16 x i16> @shuffle_vpermv3_v16i16_demandedbits(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %m) #0 {
; CHECK-LABEL: define <16 x i16> @shuffle_vpermv3_v16i16_demandedbits(
; CHECK-SAME: <16 x i16> [[X0:%.*]], <16 x i16> [[X1:%.*]], <16 x i16> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <16 x i16> [[M]], splat (i16 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i16> [[TMP4]], zeroinitializer
@@ -542,7 +542,7 @@ define <32 x i16> @shuffle_vpermv3_v32i16(<32 x i16> %x0, <32 x i16> %x1) #0 {
; CHECK-LABEL: define <32 x i16> @shuffle_vpermv3_v32i16(
; CHECK-SAME: <32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> <i16 33, i16 17, i16 35, i16 19, i16 37, i16 21, i16 39, i16 23, i16 41, i16 25, i16 43, i16 27, i16 45, i16 29, i16 47, i16 31, i16 49, i16 14, i16 51, i16 12, i16 53, i16 10, i16 55, i16 8, i16 57, i16 6, i16 59, i16 4, i16 61, i16 2, i16 63, i16 0>, <32 x i16> [[TMP2]])
; CHECK-NEXT: [[R:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[X0]], <32 x i16> <i16 33, i16 17, i16 35, i16 19, i16 37, i16 21, i16 39, i16 23, i16 41, i16 25, i16 43, i16 27, i16 45, i16 29, i16 47, i16 31, i16 49, i16 14, i16 51, i16 12, i16 53, i16 10, i16 55, i16 8, i16 57, i16 6, i16 59, i16 4, i16 61, i16 2, i16 63, i16 0>, <32 x i16> [[X1]])
@@ -570,9 +570,9 @@ define <32 x i16> @shuffle_vpermv3_v32i16_unary(<32 x i16> %x0) #0 {
define <32 x i16> @shuffle_vpermv3_v32i16_demandedbits(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %m) #0 {
; CHECK-LABEL: define <32 x i16> @shuffle_vpermv3_v32i16_demandedbits(
; CHECK-SAME: <32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]], <32 x i16> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <32 x i16> [[M]], splat (i16 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <32 x i16> [[TMP4]], zeroinitializer
@@ -607,7 +607,7 @@ define <16 x i8> @shuffle_vpermv3_v16i8(<16 x i8> %x0, <16 x i8> %x1) #0 {
; CHECK-LABEL: define <16 x i8> @shuffle_vpermv3_v16i8(
; CHECK-SAME: <16 x i8> [[X0:%.*]], <16 x i8> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8> [[TMP1]], <16 x i8> <i8 16, i8 14, i8 18, i8 12, i8 20, i8 10, i8 22, i8 8, i8 24, i8 6, i8 26, i8 4, i8 28, i8 2, i8 30, i8 0>, <16 x i8> [[TMP2]])
; CHECK-NEXT: [[R:%.*]] = call <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8> [[X0]], <16 x i8> <i8 16, i8 14, i8 18, i8 12, i8 20, i8 10, i8 22, i8 8, i8 24, i8 6, i8 26, i8 4, i8 28, i8 2, i8 30, i8 0>, <16 x i8> [[X1]])
@@ -635,9 +635,9 @@ define <16 x i8> @shuffle_vpermv3_v16i8_unary(<16 x i8> %x0) #0 {
define <16 x i8> @shuffle_vpermv3_v16i8_demandedbits(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %m) #0 {
; CHECK-LABEL: define <16 x i8> @shuffle_vpermv3_v16i8_demandedbits(
; CHECK-SAME: <16 x i8> [[X0:%.*]], <16 x i8> [[X1:%.*]], <16 x i8> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <16 x i8> [[M]], splat (i8 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i8> [[TMP4]], zeroinitializer
@@ -668,7 +668,7 @@ define <32 x i8> @shuffle_vpermv3_v32i8(<32 x i8> %x0, <32 x i8> %x1) #0 {
; CHECK-LABEL: define <32 x i8> @shuffle_vpermv3_v32i8(
; CHECK-SAME: <32 x i8> [[X0:%.*]], <32 x i8> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8> [[TMP1]], <32 x i8> <i8 33, i8 17, i8 35, i8 19, i8 37, i8 21, i8 39, i8 23, i8 41, i8 25, i8 43, i8 27, i8 45, i8 29, i8 47, i8 31, i8 49, i8 14, i8 51, i8 12, i8 53, i8 10, i8 55, i8 8, i8 57, i8 6, i8 59, i8 4, i8 61, i8 2, i8 63, i8 0>, <32 x i8> [[TMP2]])
; CHECK-NEXT: [[R:%.*]] = call <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8> [[X0]], <32 x i8> <i8 33, i8 17, i8 35, i8 19, i8 37, i8 21, i8 39, i8 23, i8 41, i8 25, i8 43, i8 27, i8 45, i8 29, i8 47, i8 31, i8 49, i8 14, i8 51, i8 12, i8 53, i8 10, i8 55, i8 8, i8 57, i8 6, i8 59, i8 4, i8 61, i8 2, i8 63, i8 0>, <32 x i8> [[X1]])
@@ -696,9 +696,9 @@ define <32 x i8> @shuffle_vpermv3_v32i8_unary(<32 x i8> %x0) #0 {
define <32 x i8> @shuffle_vpermv3_v32i8_demandedbits(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %m) #0 {
; CHECK-LABEL: define <32 x i8> @shuffle_vpermv3_v32i8_demandedbits(
; CHECK-SAME: <32 x i8> [[X0:%.*]], <32 x i8> [[X1:%.*]], <32 x i8> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <32 x i8> [[M]], splat (i8 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <32 x i8> [[TMP4]], zeroinitializer
@@ -729,7 +729,7 @@ define <64 x i8> @shuffle_vpermv3_v64i8(<64 x i8> %x0, <64 x i8> %x1) #0 {
; CHECK-LABEL: define <64 x i8> @shuffle_vpermv3_v64i8(
; CHECK-SAME: <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP1:%.*]] = call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> [[TMP1]], <64 x i8> <i8 -128, i8 127, i8 126, i8 125, i8 124, i8 123, i8 122, i8 121, i8 120, i8 119, i8 118, i8 115, i8 51, i8 50, i8 49, i8 48, i8 47, i8 46, i8 45, i8 44, i8 43, i8 42, i8 41, i8 40, i8 39, i8 38, i8 37, i8 36, i8 35, i8 34, i8 33, i8 32, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <64 x i8> [[TMP2]])
; CHECK-NEXT: [[R:%.*]] = call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> [[X0]], <64 x i8> <i8 -128, i8 127, i8 126, i8 125, i8 124, i8 123, i8 122, i8 121, i8 120, i8 119, i8 118, i8 115, i8 51, i8 50, i8 49, i8 48, i8 47, i8 46, i8 45, i8 44, i8 43, i8 42, i8 41, i8 40, i8 39, i8 38, i8 37, i8 36, i8 35, i8 34, i8 33, i8 32, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <64 x i8> [[X1]])
@@ -757,9 +757,9 @@ define <64 x i8> @shuffle_vpermv3_v64i8_unary(<64 x i8> %x0) #0 {
define <64 x i8> @shuffle_vpermv3_v64i8_demandedbits(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %m) #0 {
; CHECK-LABEL: define <64 x i8> @shuffle_vpermv3_v64i8_demandedbits(
; CHECK-SAME: <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i8> [[M:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = xor <64 x i8> [[M]], splat (i8 -1)
; CHECK-NEXT: [[TMP5:%.*]] = and <64 x i8> [[TMP4]], zeroinitializer
diff --git a/llvm/test/Instrumentation/MemorySanitizer/array_types.ll b/llvm/test/Instrumentation/MemorySanitizer/array_types.ll
index ddebe3e..399c0fe 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/array_types.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/array_types.ll
@@ -10,7 +10,7 @@ define [2 x i32] @InsertValue(i32 %x, i32 %y) sanitize_memory {
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[TMP0]], 0
; CHECK-NEXT: [[A:%.*]] = insertvalue [2 x i32] undef, i32 [[X]], 0
@@ -24,8 +24,8 @@ define [2 x i32] @InsertValue(i32 %x, i32 %y) sanitize_memory {
; CHECK-ORIGIN-NEXT: [[ENTRY:.*:]]
; CHECK-ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CHECK-ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CHECK-ORIGIN-NEXT: call void @llvm.donothing()
; CHECK-ORIGIN-NEXT: [[TMP4:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[TMP0]], 0
; CHECK-ORIGIN-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP0]], 0
@@ -50,7 +50,7 @@ define [2 x double] @InsertValueDouble(double %x, double %y) sanitize_memory {
; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[TMP0]], 0
; CHECK-NEXT: [[A:%.*]] = insertvalue [2 x double] undef, double [[X]], 0
@@ -64,8 +64,8 @@ define [2 x double] @InsertValueDouble(double %x, double %y) sanitize_memory {
; CHECK-ORIGIN-NEXT: [[ENTRY:.*:]]
; CHECK-ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CHECK-ORIGIN-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-ORIGIN-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CHECK-ORIGIN-NEXT: call void @llvm.donothing()
; CHECK-ORIGIN-NEXT: [[TMP4:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[TMP0]], 0
; CHECK-ORIGIN-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP0]], 0
diff --git a/llvm/test/Instrumentation/MemorySanitizer/bmi.ll b/llvm/test/Instrumentation/MemorySanitizer/bmi.ll
index f0f67fc..46bec29 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/bmi.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/bmi.ll
@@ -19,7 +19,7 @@ define i32 @Test_bzhi_32(i32 %a, i32 %b) sanitize_memory {
; CHECK-LABEL: define i32 @Test_bzhi_32(
; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP0]], 0
@@ -40,7 +40,7 @@ define i64 @Test_bzhi_64(i64 %a, i64 %b) sanitize_memory {
; CHECK-LABEL: define i64 @Test_bzhi_64(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP0]], 0
@@ -62,7 +62,7 @@ define i32 @Test_bextr_32(i32 %a, i32 %b) sanitize_memory {
; CHECK-LABEL: define i32 @Test_bextr_32(
; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP0]], 0
@@ -83,7 +83,7 @@ define i64 @Test_bextr_64(i64 %a, i64 %b) sanitize_memory {
; CHECK-LABEL: define i64 @Test_bextr_64(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP0]], 0
@@ -105,7 +105,7 @@ define i32 @Test_pdep_32(i32 %a, i32 %b) sanitize_memory {
; CHECK-LABEL: define i32 @Test_pdep_32(
; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP0]], 0
@@ -126,7 +126,7 @@ define i64 @Test_pdep_64(i64 %a, i64 %b) sanitize_memory {
; CHECK-LABEL: define i64 @Test_pdep_64(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP0]], 0
@@ -147,7 +147,7 @@ define i32 @Test_pext_32(i32 %a, i32 %b) sanitize_memory {
; CHECK-LABEL: define i32 @Test_pext_32(
; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP0]], 0
@@ -168,7 +168,7 @@ define i64 @Test_pext_64(i64 %a, i64 %b) sanitize_memory {
; CHECK-LABEL: define i64 @Test_pext_64(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP0]], 0
diff --git a/llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll b/llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll
index e06576e..0acdf71 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
%struct.S = type { i64, i64, i64, [8 x i8] }
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{.*}} add {{.*}} ptrtoint {{.*}} @__msan_param_tls {{.*}} i64 8) {{.*}}, ptr align 8 {{.*}}, i64 32, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), ptr align 8 {{.*}}, i64 32, i1 false)
define void @Caller() sanitize_memory {
entry:
diff --git a/llvm/test/Instrumentation/MemorySanitizer/byval.ll b/llvm/test/Instrumentation/MemorySanitizer/byval.ll
index 69970896..9f6a7cb 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/byval.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/byval.ll
@@ -16,8 +16,8 @@ define i128 @ByValArgument(i32, ptr byval(i128) %p) sanitize_memory {
; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 16, i1 false)
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP5]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP5]], ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), i64 16, i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[X:%.*]] = load i128, ptr [[P]], align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
@@ -66,8 +66,8 @@ define void @ByValForward(i32, ptr byval(i128) %p) sanitize_memory {
; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 16, i1 false)
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP5]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP5]], ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), i64 16, i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @Fn(ptr [[P]])
@@ -107,8 +107,8 @@ define void @ByValForwardByVal(i32, ptr byval(i128) %p) sanitize_memory {
; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 16, i1 false)
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP5]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP5]], ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), i64 16, i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
@@ -165,8 +165,8 @@ define i8 @ByValArgument8(i32, ptr byval(i8) %p) sanitize_memory {
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP3]], ptr align 1 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 1, i1 false)
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP6]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 4, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP3]], ptr align 1 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 1, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP6]], ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), i64 4, i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[P]], align 1
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
@@ -218,8 +218,8 @@ define void @ByValForward8(i32, ptr byval(i8) %p) sanitize_memory {
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP3]], ptr align 1 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 1, i1 false)
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP6]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 4, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP3]], ptr align 1 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 1, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP6]], ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), i64 4, i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @Fn8(ptr [[P]])
@@ -261,8 +261,8 @@ define void @ByValForwardByVal8(i32, ptr byval(i8) %p) sanitize_memory {
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP3]], ptr align 1 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 1, i1 false)
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP6]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 4, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP3]], ptr align 1 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 1, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP6]], ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), i64 4, i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
diff --git a/llvm/test/Instrumentation/MemorySanitizer/expand-experimental-reductions.ll b/llvm/test/Instrumentation/MemorySanitizer/expand-experimental-reductions.ll
index 0696ac9..582d753 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/expand-experimental-reductions.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/expand-experimental-reductions.ll
@@ -134,7 +134,7 @@ define float @fadd_f32_accum(float %accum, <4 x float> %vec) #0 {
; CHECK-SAME: float [[ACCUM:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP1]])
; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP2]]
@@ -152,7 +152,7 @@ define float @fadd_f32_strict(float %param, <4 x float> %vec) #0 {
; CHECK-SAME: float [[PARAM:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]])
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP3]], [[TMP1]]
@@ -170,7 +170,7 @@ define float @fadd_f32_strict_accum(float %accum, <4 x float> %vec) #0 {
; CHECK-SAME: float [[ACCUM:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP1]])
; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP2]]
@@ -205,7 +205,7 @@ define float @fmul_f32_accum(float %accum, <4 x float> %vec) #0 {
; CHECK-SAME: float [[ACCUM:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP1]])
; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP2]]
@@ -223,7 +223,7 @@ define float @fmul_f32_strict(float %param, <4 x float> %vec) #0 {
; CHECK-SAME: float [[PARAM:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]])
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP3]], [[TMP1]]
@@ -241,7 +241,7 @@ define float @fmul_f32_strict_accum(float %accum, <4 x float> %vec) #0 {
; CHECK-SAME: float [[ACCUM:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP1]])
; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP2]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll b/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll
index 5ea407b..a96046b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll
@@ -7,17 +7,17 @@ target triple = "x86_64-unknown-linux-gnu"
define void @var_funnel_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64> %a512, i64 %b64, <2 x i64> %b128, <4 x i64> %b256, <8 x i64> %b512, i64 %c64, <2 x i64> %c128, <4 x i64> %c256, <8 x i64> %c512) sanitize_memory {
; CHECK-LABEL: @var_funnel_i64(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0
; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i64
@@ -51,17 +51,17 @@ define void @var_funnel_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64
define void @var_funnel_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i32> %a512, i32 %b32, <4 x i32> %b128, <8 x i32> %b256, <16 x i32> %b512, i32 %c32, <4 x i32> %c128, <8 x i32> %c256, <16 x i32> %c512) sanitize_memory {
; CHECK-LABEL: @var_funnel_i32(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP3]], 0
; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i32
@@ -95,17 +95,17 @@ define void @var_funnel_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i3
define void @var_funnel_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i16> %a512, i16 %b16, <8 x i16> %b128, <16 x i16> %b256, <32 x i16> %b512, i16 %c16, <8 x i16> %c128, <16 x i16> %c256, <32 x i16> %c512) sanitize_memory {
; CHECK-LABEL: @var_funnel_i16(
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i16 [[TMP3]], 0
; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i16
@@ -139,17 +139,17 @@ define void @var_funnel_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i
define void @var_funnel_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %a512, i8 %b8, <16 x i8> %b128, <32 x i8> %b256, <64 x i8> %b512, i8 %c8, <16 x i8> %c128, <32 x i8> %c256, <64 x i8> %c512) sanitize_memory {
; CHECK-LABEL: @var_funnel_i8(
; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP3]], 0
; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i8
@@ -183,13 +183,13 @@ define void @var_funnel_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %
define void @var_rotate_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64> %a512, i64 %c64, <2 x i64> %c128, <4 x i64> %c256, <8 x i64> %c512) sanitize_memory {
; CHECK-LABEL: @var_rotate_i64(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i64 [[TMP2]], 0
; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i64
@@ -223,13 +223,13 @@ define void @var_rotate_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64
define void @var_rotate_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i32> %a512, i32 %c32, <4 x i32> %c128, <8 x i32> %c256, <16 x i32> %c512) sanitize_memory {
; CHECK-LABEL: @var_rotate_i32(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i32
@@ -263,13 +263,13 @@ define void @var_rotate_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i3
define void @var_rotate_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i16> %a512, i16 %c16, <8 x i16> %c128, <16 x i16> %c256, <32 x i16> %c512) sanitize_memory {
; CHECK-LABEL: @var_rotate_i16(
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i16 [[TMP2]], 0
; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i16
@@ -303,13 +303,13 @@ define void @var_rotate_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i
define void @var_rotate_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %a512, i8 %c8, <16 x i8> %c128, <32 x i8> %c256, <64 x i8> %c512) sanitize_memory {
; CHECK-LABEL: @var_rotate_i8(
; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP2]], 0
; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i8
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll
index cbc556f..0d94357 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll
@@ -13,7 +13,7 @@ target triple = "i386-unknown-linux-gnu"
define <4 x double> @test_x86_avx_addsub_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_addsub_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
@@ -30,7 +30,7 @@ declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nou
define <8 x float> @test_x86_avx_addsub_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_addsub_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -46,8 +46,8 @@ declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwi
define <4 x double> @test_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_blendv_pd_256(
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -75,8 +75,8 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4
define <8 x float> @test_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_blendv_ps_256(
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -105,7 +105,7 @@ declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x f
define <4 x double> @test_x86_avx_cmp_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_cmp_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
@@ -124,7 +124,7 @@ declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) no
define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_cmp_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -141,7 +141,7 @@ define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) #0
define <8 x float> @test_x86_avx_cmp_ps_256_pseudo_op(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_cmp_ps_256_pseudo_op(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP99:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -400,7 +400,7 @@ declare <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float>) nounwind readnone
define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_dp_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -427,7 +427,7 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i8) nounwi
define <4 x double> @test_x86_avx_hadd_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_hadd_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -446,7 +446,7 @@ declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounw
define <8 x float> @test_x86_avx_hadd_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_hadd_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -465,7 +465,7 @@ declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_x86_avx_hsub_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_hsub_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -484,7 +484,7 @@ declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) nounw
define <8 x float> @test_x86_avx_hsub_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_hsub_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -527,7 +527,7 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.256(ptr) nounwind readonly
define <2 x double> @test_x86_avx_maskload_pd(ptr %a0, <2 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx_maskload_pd(
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -554,7 +554,7 @@ declare <2 x double> @llvm.x86.avx.maskload.pd(ptr, <2 x i64>) nounwind readonly
define <4 x double> @test_x86_avx_maskload_pd_256(ptr %a0, <4 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx_maskload_pd_256(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -581,7 +581,7 @@ declare <4 x double> @llvm.x86.avx.maskload.pd.256(ptr, <4 x i64>) nounwind read
define <4 x float> @test_x86_avx_maskload_ps(ptr %a0, <4 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx_maskload_ps(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -608,7 +608,7 @@ declare <4 x float> @llvm.x86.avx.maskload.ps(ptr, <4 x i32>) nounwind readonly
define <8 x float> @test_x86_avx_maskload_ps_256(ptr %a0, <8 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx_maskload_ps_256(
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -635,9 +635,9 @@ declare <8 x float> @llvm.x86.avx.maskload.ps.256(ptr, <8 x i32>) nounwind reado
define void @test_x86_avx_maskstore_pd(ptr %a0, <2 x i64> %mask, <2 x double> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_maskstore_pd(
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -665,9 +665,9 @@ declare void @llvm.x86.avx.maskstore.pd(ptr, <2 x i64>, <2 x double>) nounwind
define void @test_x86_avx_maskstore_pd_256(ptr %a0, <4 x i64> %mask, <4 x double> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_maskstore_pd_256(
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -695,9 +695,9 @@ declare void @llvm.x86.avx.maskstore.pd.256(ptr, <4 x i64>, <4 x double>) nounwi
define void @test_x86_avx_maskstore_ps(ptr %a0, <4 x i32> %mask, <4 x float> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_maskstore_ps(
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -725,9 +725,9 @@ declare void @llvm.x86.avx.maskstore.ps(ptr, <4 x i32>, <4 x float>) nounwind
define void @test_x86_avx_maskstore_ps_256(ptr %a0, <8 x i32> %mask, <8 x float> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_maskstore_ps_256(
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -756,7 +756,7 @@ declare void @llvm.x86.avx.maskstore.ps.256(ptr, <8 x i32>, <8 x float>) nounwin
define <4 x double> @test_x86_avx_max_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_max_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
@@ -773,7 +773,7 @@ declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwi
define <8 x float> @test_x86_avx_max_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_max_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -790,7 +790,7 @@ declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind
define <4 x double> @test_x86_avx_min_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_min_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
@@ -807,7 +807,7 @@ declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwi
define <8 x float> @test_x86_avx_min_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_min_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -868,7 +868,7 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone
define i32 @test_x86_avx_ptestc_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_ptestc_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
@@ -888,7 +888,7 @@ declare i32 @llvm.x86.avx.ptestc.256(<4 x i64>, <4 x i64>) nounwind readnone
define i32 @test_x86_avx_ptestnzc_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_ptestnzc_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
@@ -908,7 +908,7 @@ declare i32 @llvm.x86.avx.ptestnzc.256(<4 x i64>, <4 x i64>) nounwind readnone
define i32 @test_x86_avx_ptestz_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_ptestz_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
@@ -987,7 +987,7 @@ declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone
define <2 x double> @test_x86_avx_vpermilvar_pd(<2 x double> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vpermilvar_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[A1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[A1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <2 x i64> [[A1]] to <2 x i1>
@@ -1014,7 +1014,7 @@ declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) nounwi
define <4 x double> @test_x86_avx_vpermilvar_pd_256(<4 x double> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vpermilvar_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[A1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[A1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i64> [[A1]] to <4 x i2>
@@ -1056,7 +1056,7 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) #0 {
define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vpermilvar_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[A1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[A1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i32> [[A1]] to <4 x i2>
@@ -1079,7 +1079,7 @@ define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) #
}
define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, ptr %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vpermilvar_ps_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1119,7 +1119,7 @@ declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) nounwind
define <8 x float> @test_x86_avx_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vpermilvar_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[A1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[A1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i32> [[A1]] to <8 x i3>
@@ -1146,7 +1146,7 @@ declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) noun
define i32 @test_x86_avx_vtestc_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestc_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -1166,7 +1166,7 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnon
define i32 @test_x86_avx_vtestc_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestc_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
@@ -1186,7 +1186,7 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind rea
define i32 @test_x86_avx_vtestc_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestc_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -1206,7 +1206,7 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_avx_vtestc_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestc_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -1226,7 +1226,7 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readn
define i32 @test_x86_avx_vtestnzc_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestnzc_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -1246,7 +1246,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_avx_vtestnzc_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestnzc_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
@@ -1266,7 +1266,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double>, <4 x double>) nounwind r
define i32 @test_x86_avx_vtestnzc_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestnzc_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -1286,7 +1286,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps(<4 x float>, <4 x float>) nounwind readnon
define i32 @test_x86_avx_vtestnzc_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestnzc_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -1306,7 +1306,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float>, <8 x float>) nounwind rea
define i32 @test_x86_avx_vtestz_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestz_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -1326,7 +1326,7 @@ declare i32 @llvm.x86.avx.vtestz.pd(<2 x double>, <2 x double>) nounwind readnon
define i32 @test_x86_avx_vtestz_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestz_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
@@ -1346,7 +1346,7 @@ declare i32 @llvm.x86.avx.vtestz.pd.256(<4 x double>, <4 x double>) nounwind rea
define i32 @test_x86_avx_vtestz_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestz_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -1366,7 +1366,7 @@ declare i32 @llvm.x86.avx.vtestz.ps(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_avx_vtestz_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestz_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -1410,7 +1410,7 @@ declare void @llvm.x86.avx.vzeroupper() nounwind
define void @movnt_dq(ptr %p, <2 x i64> %a1) nounwind #0 {
; CHECK-LABEL: @movnt_dq(
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1442,7 +1442,7 @@ define void @movnt_ps(ptr %p, <8 x float> %a) nounwind #0 {
; CHECK-LABEL: @movnt_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
@@ -1465,7 +1465,7 @@ declare void @llvm.x86.avx.movnt.ps.256(ptr, <8 x float>) nounwind
define void @movnt_pd(ptr %p, <4 x double> %a1) nounwind #0 {
; add operation forces the execution domain.
; CHECK-LABEL: @movnt_pd(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1494,7 +1494,7 @@ declare void @llvm.x86.avx.movnt.pd.256(ptr, <4 x double>) nounwind
define <2 x i64> @test_x86_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_pclmulqdq(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <2 x i32> zeroinitializer
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll
index cd79bcb..6471e09 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll
@@ -7,7 +7,7 @@ target triple = "i386-unknown-linux-gnu"
define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_packssdw(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer
@@ -42,7 +42,7 @@ define <16 x i16> @test_x86_avx2_packssdw_fold() #0 {
define <32 x i8> @test_x86_avx2_packsswb(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_packsswb(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer
@@ -77,7 +77,7 @@ define <32 x i8> @test_x86_avx2_packsswb_fold() #0 {
define <32 x i8> @test_x86_avx2_packuswb(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_packuswb(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer
@@ -112,7 +112,7 @@ define <32 x i8> @test_x86_avx2_packuswb_fold() #0 {
define <32 x i8> @test_x86_avx2_pavg_b(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pavg_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
@@ -129,7 +129,7 @@ declare <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i16> @test_x86_avx2_pavg_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pavg_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
@@ -146,7 +146,7 @@ declare <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16>, <16 x i16>) nounwind readno
define <8 x i32> @test_x86_avx2_pmadd_wd(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmadd_wd(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer
@@ -197,7 +197,7 @@ declare i32 @llvm.x86.avx2.pmovmskb(<32 x i8>) nounwind readnone
define <16 x i16> @test_x86_avx2_pmulh_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmulh_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
@@ -214,7 +214,7 @@ declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @test_x86_avx2_pmulhu_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmulhu_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
@@ -231,7 +231,7 @@ declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind read
define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psad_bw(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
@@ -252,7 +252,7 @@ declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psll_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -275,7 +275,7 @@ declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psll_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -298,7 +298,7 @@ declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone
define <16 x i16> @test_x86_avx2_psll_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psll_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
@@ -372,7 +372,7 @@ declare <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16>, i32) nounwind readnone
define <8 x i32> @test_x86_avx2_psra_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psra_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -395,7 +395,7 @@ declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_psra_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psra_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
@@ -452,7 +452,7 @@ declare <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16>, i32) nounwind readnone
define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrl_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -475,7 +475,7 @@ declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrl_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -498,7 +498,7 @@ declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone
define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrl_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
@@ -520,7 +520,7 @@ declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnon
define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, ptr %p) #0 {
; CHECK-LABEL: @test_x86_avx2_psrl_w_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -606,7 +606,7 @@ declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) nounwind readnone
define <8 x i32> @test_x86_avx2_phadd_d(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phadd_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -625,7 +625,7 @@ declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_phadd_sw(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phadd_sw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -644,7 +644,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind read
define <16 x i16> @test_x86_avx2_phadd_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phadd_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -663,7 +663,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readn
define <8 x i32> @test_x86_avx2_phsub_d(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phsub_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -682,7 +682,7 @@ declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_phsub_sw(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phsub_sw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -701,7 +701,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind read
define <16 x i16> @test_x86_avx2_phsub_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phsub_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -720,7 +720,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @test_x86_avx2_pmadd_ub_sw(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmadd_ub_sw(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i8> [[TMP1]], zeroinitializer
@@ -748,7 +748,7 @@ declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind rea
define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(ptr %ptr, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmadd_ub_sw_load_op0(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -787,7 +787,7 @@ define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(ptr %ptr, <32 x i8> %a1) #
define <16 x i16> @test_x86_avx2_pmul_hr_sw(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmul_hr_sw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
@@ -804,7 +804,7 @@ declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind re
define <32 x i8> @test_x86_avx2_pshuf_b(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pshuf_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> [[TMP1]], <32 x i8> [[A1:%.*]])
@@ -822,7 +822,7 @@ declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone
define <32 x i8> @test_x86_avx2_psign_b(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psign_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
@@ -839,7 +839,7 @@ declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone
define <8 x i32> @test_x86_avx2_psign_d(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psign_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -856,7 +856,7 @@ declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone
define <16 x i16> @test_x86_avx2_psign_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psign_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
@@ -873,7 +873,7 @@ declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readn
define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_mpsadbw(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <32 x i8> [[TMP1]] to i256
@@ -898,7 +898,7 @@ declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind rea
define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(ptr %ptr, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_mpsadbw_load_op0(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -934,7 +934,7 @@ define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(ptr %ptr, <32 x i8> %a1) #0 {
define <16 x i16> @test_x86_avx2_packusdw(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_packusdw(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer
@@ -968,8 +968,8 @@ define <16 x i16> @test_x86_avx2_packusdw_fold() #0 {
define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) #0 {
; CHECK-LABEL: @test_x86_avx2_pblendvb(
-; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -995,7 +995,7 @@ declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounw
define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pblendw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 16, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -1012,7 +1012,7 @@ declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind r
define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pblendd_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> <i32 4, i32 5, i32 6, i32 3>
@@ -1029,7 +1029,7 @@ declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i8) nounwind
define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pblendd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1046,7 +1046,7 @@ declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i8) nounwind
define <8 x i32> @test_x86_avx2_permd(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_permd(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
@@ -1063,7 +1063,7 @@ declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_permps(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
@@ -1088,7 +1088,7 @@ declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind reado
define <2 x i64> @test_x86_avx2_maskload_q(ptr %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_maskload_q(
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -1114,7 +1114,7 @@ declare <2 x i64> @llvm.x86.avx2.maskload.q(ptr, <2 x i64>) nounwind readonly
define <4 x i64> @test_x86_avx2_maskload_q_256(ptr %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_maskload_q_256(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -1140,7 +1140,7 @@ declare <4 x i64> @llvm.x86.avx2.maskload.q.256(ptr, <4 x i64>) nounwind readonl
define <4 x i32> @test_x86_avx2_maskload_d(ptr %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_maskload_d(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -1166,7 +1166,7 @@ declare <4 x i32> @llvm.x86.avx2.maskload.d(ptr, <4 x i32>) nounwind readonly
define <8 x i32> @test_x86_avx2_maskload_d_256(ptr %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_maskload_d_256(
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -1192,9 +1192,9 @@ declare <8 x i32> @llvm.x86.avx2.maskload.d.256(ptr, <8 x i32>) nounwind readonl
define void @test_x86_avx2_maskstore_q(ptr %a0, <2 x i64> %a1, <2 x i64> %a2) #0 {
; CHECK-LABEL: @test_x86_avx2_maskstore_q(
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -1221,9 +1221,9 @@ declare void @llvm.x86.avx2.maskstore.q(ptr, <2 x i64>, <2 x i64>) nounwind
define void @test_x86_avx2_maskstore_q_256(ptr %a0, <4 x i64> %a1, <4 x i64> %a2) #0 {
; CHECK-LABEL: @test_x86_avx2_maskstore_q_256(
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -1250,9 +1250,9 @@ declare void @llvm.x86.avx2.maskstore.q.256(ptr, <4 x i64>, <4 x i64>) nounwind
define void @test_x86_avx2_maskstore_d(ptr %a0, <4 x i32> %a1, <4 x i32> %a2) #0 {
; CHECK-LABEL: @test_x86_avx2_maskstore_d(
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -1279,9 +1279,9 @@ declare void @llvm.x86.avx2.maskstore.d(ptr, <4 x i32>, <4 x i32>) nounwind
define void @test_x86_avx2_maskstore_d_256(ptr %a0, <8 x i32> %a1, <8 x i32> %a2) #0 {
; CHECK-LABEL: @test_x86_avx2_maskstore_d_256(
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
@@ -1309,7 +1309,7 @@ declare void @llvm.x86.avx2.maskstore.d.256(ptr, <8 x i32>, <8 x i32>) nounwind
define <4 x i32> @test_x86_avx2_psllv_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer
@@ -1350,7 +1350,7 @@ declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_psllv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer
@@ -1391,7 +1391,7 @@ declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x i64> @test_x86_avx2_psllv_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer
@@ -1424,7 +1424,7 @@ declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_x86_avx2_psllv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_q_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer
@@ -1458,7 +1458,7 @@ declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind read
define <4 x i32> @test_x86_avx2_psrlv_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer
@@ -1499,7 +1499,7 @@ declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_psrlv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer
@@ -1540,7 +1540,7 @@ declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x i64> @test_x86_avx2_psrlv_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer
@@ -1574,7 +1574,7 @@ declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_x86_avx2_psrlv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_q_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer
@@ -1609,7 +1609,7 @@ declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind read
define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrav_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer
@@ -1642,7 +1642,7 @@ declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrav_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer
@@ -1675,9 +1675,9 @@ declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind read
define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, ptr %a1, <4 x i32> %idx, <2 x double> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
@@ -1709,9 +1709,9 @@ declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, ptr,
define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, ptr %a1, <4 x i32> %idx, <4 x double> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
@@ -1743,9 +1743,9 @@ declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, ptr,
define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, ptr %a1, <2 x i64> %idx, <2 x double> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
@@ -1777,9 +1777,9 @@ declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, ptr,
define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, ptr %a1, <4 x i64> %idx, <4 x double> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
@@ -1811,9 +1811,9 @@ declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, ptr,
define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, ptr %a1, <4 x i32> %idx, <4 x float> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
@@ -1845,9 +1845,9 @@ declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, ptr,
define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, ptr %a1, <8 x i32> %idx, <8 x float> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
@@ -1879,9 +1879,9 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, ptr,
define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, ptr %a1, <2 x i64> %idx, <4 x float> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
@@ -1913,9 +1913,9 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, ptr,
define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, ptr %a1, <4 x i64> %idx, <4 x float> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
@@ -1947,9 +1947,9 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, ptr,
define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, ptr %a1, <4 x i32> %idx, <2 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
@@ -1981,9 +1981,9 @@ declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, ptr,
define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, ptr %a1, <4 x i32> %idx, <4 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_q_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
@@ -2015,9 +2015,9 @@ declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, ptr,
define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, ptr %a1, <2 x i64> %idx, <2 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
@@ -2049,9 +2049,9 @@ declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, ptr,
define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, ptr %a1, <4 x i64> %idx, <4 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_q_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
@@ -2083,9 +2083,9 @@ declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, ptr,
define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, ptr %a1, <4 x i32> %idx, <4 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
@@ -2117,9 +2117,9 @@ declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, ptr,
define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, ptr %a1, <8 x i32> %idx, <8 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_d_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
@@ -2151,9 +2151,9 @@ declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, ptr,
define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, ptr %a1, <2 x i64> %idx, <4 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
@@ -2185,9 +2185,9 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, ptr,
define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, ptr %a1, <4 x i64> %idx, <4 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx2_gather_q_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
@@ -2219,10 +2219,10 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, ptr,
define <8 x float> @test_gather_mask(<8 x float> %a0, ptr %a, <8 x i32> %idx, <8 x float> %mask, ptr nocapture %out) #0 {
; CHECK-LABEL: @test_gather_mask(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
@@ -2265,10 +2265,10 @@ define <8 x float> @test_gather_mask(<8 x float> %a0, ptr %a, <8 x i32> %idx, <
define <2 x i64> @test_mask_demanded_bits(<2 x i64> %a0, ptr %a1, <2 x i64> %idx, <2 x i1> %mask) #0 {
; CHECK-LABEL: @test_mask_demanded_bits(
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll
index 8052b5e..1b7e3d78 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll
@@ -22,7 +22,7 @@ define i64 @test1(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test1(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -69,7 +69,7 @@ define i64 @test88(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test88(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -110,7 +110,7 @@ define i64 @test87(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test87(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -151,7 +151,7 @@ define i64 @test86(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test86(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -192,7 +192,7 @@ define i64 @test85(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test85(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -233,7 +233,7 @@ define i64 @test84(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test84(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -274,7 +274,7 @@ define i64 @test83(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test83(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -315,7 +315,7 @@ define i64 @test82(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test82(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -356,7 +356,7 @@ define i64 @test81(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test81(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -397,7 +397,7 @@ define i64 @test80(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test80(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -438,7 +438,7 @@ define i64 @test79(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test79(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -479,7 +479,7 @@ define i64 @test78(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test78(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -520,7 +520,7 @@ define i64 @test77(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test77(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -561,7 +561,7 @@ define i64 @test76(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test76(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -610,7 +610,7 @@ define i64 @test75(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test75(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -659,7 +659,7 @@ define i64 @test74(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test74(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1076,7 +1076,7 @@ define i64 @test65(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
@@ -1122,7 +1122,7 @@ define i64 @test64(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
@@ -1168,7 +1168,7 @@ define i64 @test63(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP7]], i32 0
@@ -1208,7 +1208,7 @@ define i64 @test62(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
@@ -1254,7 +1254,7 @@ define i64 @test61(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
@@ -1300,7 +1300,7 @@ define i64 @test60(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP7]], i32 0
@@ -1340,7 +1340,7 @@ define i64 @test59(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
@@ -1386,7 +1386,7 @@ define i64 @test58(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
@@ -1431,7 +1431,7 @@ define i64 @test56(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test56(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1472,7 +1472,7 @@ define i64 @test55(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test55(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1513,7 +1513,7 @@ define i64 @test54(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test54(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1554,7 +1554,7 @@ define i64 @test53(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test53(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1595,7 +1595,7 @@ define i64 @test52(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test52(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1634,7 +1634,7 @@ define i64 @test51(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test51(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1675,7 +1675,7 @@ define i64 @test50(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test50(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1716,7 +1716,7 @@ define i64 @test49(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test49(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP13:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP15:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1775,7 +1775,7 @@ define i64 @test48(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test48(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1816,7 +1816,7 @@ define i64 @test47(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test47(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1857,7 +1857,7 @@ define i64 @test46(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test46(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1898,7 +1898,7 @@ define i64 @test45(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test45(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1938,7 +1938,7 @@ define i64 @test44(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP4]], i32 0
@@ -1974,7 +1974,7 @@ define i64 @test43(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test43(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2015,7 +2015,7 @@ define i64 @test42(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test42(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2056,7 +2056,7 @@ define i64 @test41(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test41(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2097,7 +2097,7 @@ define i64 @test40(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test40(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2138,7 +2138,7 @@ define i64 @test39(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test39(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2179,7 +2179,7 @@ define i64 @test38(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test38(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2220,7 +2220,7 @@ define i64 @test37(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test37(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2262,7 +2262,7 @@ define i64 @test36(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP4]], i32 0
@@ -2296,7 +2296,7 @@ define i64 @test35(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test35(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2337,7 +2337,7 @@ define i64 @test34(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test34(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2378,7 +2378,7 @@ define i64 @test33(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test33(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2419,7 +2419,7 @@ define i64 @test32(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test32(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2459,7 +2459,7 @@ define i64 @test31(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test31(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2500,7 +2500,7 @@ define i64 @test30(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test30(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2541,7 +2541,7 @@ define i64 @test29(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test29(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2582,7 +2582,7 @@ define i64 @test28(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test28(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2623,7 +2623,7 @@ define i64 @test27(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test27(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2664,7 +2664,7 @@ define i64 @test26(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test26(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2705,7 +2705,7 @@ define void @test25(ptr %p, <1 x i64> %a) nounwind optsize ssp #0 {
; CHECK-LABEL: define void @test25(
; CHECK-SAME: ptr [[P:%.*]], <1 x i64> [[A:%.*]]) #[[ATTR3:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2770,9 +2770,9 @@ define void @test23(<1 x i64> %d, <1 x i64> %n, ptr %p) nounwind optsize ssp #0
; CHECK-LABEL: define void @test23(
; CHECK-SAME: <1 x i64> [[D:%.*]], <1 x i64> [[N:%.*]], ptr [[P:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP4]] to <8 x i8>
@@ -2813,7 +2813,7 @@ define i64 @test22(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test22(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -2922,7 +2922,7 @@ define i64 @test20(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test20(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -3051,7 +3051,7 @@ define i64 @test16(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP6:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP6]], i32 0
@@ -3192,7 +3192,7 @@ define i64 @test12(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test12(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -3233,7 +3233,7 @@ define i64 @test11(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test11(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -3274,7 +3274,7 @@ define i64 @test10(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test10(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -3315,7 +3315,7 @@ define i64 @test9(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test9(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -3357,7 +3357,7 @@ define i64 @test8(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test8(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -3398,7 +3398,7 @@ define i64 @test7(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test7(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP15:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -3457,7 +3457,7 @@ define i64 @test6(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test6(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -3504,7 +3504,7 @@ define i64 @test5(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test5(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -3551,7 +3551,7 @@ define i64 @test4(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test4(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -3598,7 +3598,7 @@ define i64 @test3(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test3(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -3645,7 +3645,7 @@ define i64 @test2(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-LABEL: define i64 @test2(
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -3694,7 +3694,7 @@ define <4 x float> @test89(<4 x float> %a, <1 x i64> %b) nounwind #0 {
; CHECK-LABEL: define <4 x float> @test89(
; CHECK-SAME: <4 x float> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR4:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
@@ -3740,7 +3740,7 @@ define <1 x i64> @test_mm_insert_pi16(<1 x i64> %a.coerce, i32 %d) nounwind #0 {
; CHECK-SAME: <1 x i64> [[A_COERCE:%.*]], i32 [[D:%.*]]) #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <1 x i64> [[TMP3]] to i64
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll
index 017bbcf..e378941 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll
@@ -13,7 +13,7 @@ define void @StoreIntrinsic(ptr %p, <4 x float> %x) nounwind uwtable sanitize_me
; CHECK-LABEL: define void @StoreIntrinsic(
; CHECK-SAME: ptr [[P:%.*]], <4 x float> [[X:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -2147483649
@@ -25,8 +25,8 @@ define void @StoreIntrinsic(ptr %p, <4 x float> %x) nounwind uwtable sanitize_me
; ORIGINS-LABEL: define void @StoreIntrinsic(
; ORIGINS-SAME: ptr [[P:%.*]], <4 x float> [[X:%.*]]) #[[ATTR0:[0-9]+]] {
; ORIGINS-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; ORIGINS-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGINS-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGINS-NEXT: call void @llvm.donothing()
; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGINS-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649
@@ -107,7 +107,7 @@ define <8 x i16> @Pmulhuw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable saniti
; CHECK-LABEL: define <8 x i16> @Pmulhuw128(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
@@ -119,8 +119,8 @@ define <8 x i16> @Pmulhuw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable saniti
; ORIGINS-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] {
; ORIGINS-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; ORIGINS-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; ORIGINS-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; ORIGINS-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; ORIGINS-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; ORIGINS-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGINS-NEXT: call void @llvm.donothing()
; ORIGINS-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP3]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll
index ffad6fb..6b7f813 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll
@@ -7,7 +7,7 @@ target triple = "i386-unknown-linux-gnu"
define <4 x float> @test_x86_sse_cmp_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_cmp_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -26,7 +26,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind
define <4 x float> @test_x86_sse_cmp_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_cmp_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -47,7 +47,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind
define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comieq_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -67,7 +67,7 @@ declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comige_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comige_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -87,7 +87,7 @@ declare i32 @llvm.x86.sse.comige.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comigt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comigt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -107,7 +107,7 @@ declare i32 @llvm.x86.sse.comigt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comile_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comile_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -127,7 +127,7 @@ declare i32 @llvm.x86.sse.comile.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comilt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comilt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -147,7 +147,7 @@ declare i32 @llvm.x86.sse.comilt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comineq_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -238,7 +238,7 @@ declare void @llvm.x86.sse.ldmxcsr(ptr) nounwind
define <4 x float> @test_x86_sse_max_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_max_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -255,7 +255,7 @@ declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_x86_sse_max_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_max_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -273,7 +273,7 @@ declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_x86_sse_min_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_min_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -290,7 +290,7 @@ declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind read
define <4 x float> @test_x86_sse_min_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_min_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -415,7 +415,7 @@ declare void @llvm.x86.sse.stmxcsr(ptr) nounwind
define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomieq_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -435,7 +435,7 @@ declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomige_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomige_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -455,7 +455,7 @@ declare i32 @llvm.x86.sse.ucomige.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomigt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -475,7 +475,7 @@ declare i32 @llvm.x86.sse.ucomigt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomile_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomile_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -495,7 +495,7 @@ declare i32 @llvm.x86.sse.ucomile.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomilt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomilt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -515,7 +515,7 @@ declare i32 @llvm.x86.sse.ucomilt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomineq_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll
index 3a37eaf..806eac0 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll
@@ -15,7 +15,7 @@ target triple = "i386-unknown-linux-gnu"
define <2 x double> @test_x86_sse2_cmp_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_cmp_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -34,7 +34,7 @@ declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounw
define <2 x double> @test_x86_sse2_cmp_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_cmp_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -55,7 +55,7 @@ declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounw
define i32 @test_x86_sse2_comieq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comieq_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -75,7 +75,7 @@ declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comige_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comige_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -95,7 +95,7 @@ declare i32 @llvm.x86.sse2.comige.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comigt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comigt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -115,7 +115,7 @@ declare i32 @llvm.x86.sse2.comigt.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comile_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comile_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -135,7 +135,7 @@ declare i32 @llvm.x86.sse2.comile.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comilt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comilt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -155,7 +155,7 @@ declare i32 @llvm.x86.sse2.comilt.sd(<2 x double>, <2 x double>) nounwind readno
define i32 @test_x86_sse2_comineq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comineq_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -356,7 +356,7 @@ declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
define <4 x float> @test_x86_sse2_cvtsd2ss(<4 x float> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_cvtsd2ss(
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -380,7 +380,7 @@ declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind
define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, ptr %p1) #0 {
; CHECK-LABEL: @test_x86_sse2_cvtsd2ss_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -415,7 +415,7 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, ptr %p1) #0 {
define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, ptr %p1) optsize #0 {
; CHECK-LABEL: @test_x86_sse2_cvtsd2ss_load_optsize(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -566,7 +566,7 @@ declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
define <2 x double> @test_x86_sse2_max_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_max_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -583,7 +583,7 @@ declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_x86_sse2_max_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_max_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -601,7 +601,7 @@ declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_x86_sse2_min_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_min_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -618,7 +618,7 @@ declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind
define <2 x double> @test_x86_sse2_min_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_min_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -658,7 +658,7 @@ declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
define <8 x i16> @test_x86_sse2_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_packssdw_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer
@@ -693,7 +693,7 @@ define <8 x i16> @test_x86_sse2_packssdw_128_fold() #0 {
define <16 x i8> @test_x86_sse2_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_packsswb_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer
@@ -728,7 +728,7 @@ define <16 x i8> @test_x86_sse2_packsswb_128_fold() #0 {
define <16 x i8> @test_x86_sse2_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_packuswb_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer
@@ -763,7 +763,7 @@ define <16 x i8> @test_x86_sse2_packuswb_128_fold() #0 {
define <16 x i8> @test_x86_sse2_pavg_b(<16 x i8> %a0, <16 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pavg_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
@@ -780,7 +780,7 @@ declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @test_x86_sse2_pavg_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pavg_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
@@ -797,7 +797,7 @@ declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
define <4 x i32> @test_x86_sse2_pmadd_wd(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pmadd_wd(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer
@@ -848,7 +848,7 @@ declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
define <8 x i16> @test_x86_sse2_pmulh_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pmulh_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
@@ -865,7 +865,7 @@ declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_x86_sse2_pmulhu_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pmulhu_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
@@ -882,7 +882,7 @@ declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnon
define <2 x i64> @test_x86_sse2_psad_bw(<16 x i8> %a0, <16 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psad_bw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
@@ -903,7 +903,7 @@ declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_x86_sse2_psll_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psll_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -926,7 +926,7 @@ declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_x86_sse2_psll_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psll_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -949,7 +949,7 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_x86_sse2_psll_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psll_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
@@ -1023,7 +1023,7 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone
define <4 x i32> @test_x86_sse2_psra_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psra_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -1046,7 +1046,7 @@ declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i16> @test_x86_sse2_psra_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psra_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
@@ -1103,7 +1103,7 @@ declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone
define <4 x i32> @test_x86_sse2_psrl_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psrl_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -1126,7 +1126,7 @@ declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_x86_sse2_psrl_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psrl_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -1149,7 +1149,7 @@ declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_x86_sse2_psrl_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psrl_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
@@ -1171,7 +1171,7 @@ declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_x86_sse2_psrl_w_load(<8 x i16> %a0, ptr %p) #0 {
; CHECK-LABEL: @test_x86_sse2_psrl_w_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -1257,7 +1257,7 @@ declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone
define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomieq_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -1277,7 +1277,7 @@ declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomige_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomige_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -1297,7 +1297,7 @@ declare i32 @llvm.x86.sse2.ucomige.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomigt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -1317,7 +1317,7 @@ declare i32 @llvm.x86.sse2.ucomigt.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomile_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomile_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -1337,7 +1337,7 @@ declare i32 @llvm.x86.sse2.ucomile.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomilt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -1357,7 +1357,7 @@ declare i32 @llvm.x86.sse2.ucomilt.sd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomineq_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll
index e51c533..24f22bd 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll
@@ -6,8 +6,8 @@ target triple = "i386-unknown-linux-gnu"
define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: @test_x86_sse41_blendvpd(
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -35,8 +35,8 @@ declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x d
define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: @test_x86_sse41_blendvps(
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -65,7 +65,7 @@ declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x floa
define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_dppd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -87,7 +87,7 @@ declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwi
define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_dpps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
@@ -109,7 +109,7 @@ declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind
define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_insertps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
@@ -136,7 +136,7 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounw
define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_mpsadbw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
@@ -161,7 +161,7 @@ declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind rea
define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(ptr %ptr, <16 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_mpsadbw_load_op0(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -197,7 +197,7 @@ define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(ptr %ptr, <16 x i8> %a1) #0 {
define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_packusdw(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer
@@ -231,8 +231,8 @@ define <8 x i16> @test_x86_sse41_packusdw_fold() #0 {
define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) #0 {
; CHECK-LABEL: @test_x86_sse41_pblendvb(
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -273,7 +273,7 @@ declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_ptestc(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -293,7 +293,7 @@ declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
define i32 @test_x86_sse41_ptestnzc(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_ptestnzc(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -313,7 +313,7 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
define i32 @test_x86_sse41_ptestz(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_ptestz(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
@@ -363,7 +363,7 @@ declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_round_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP2]], <2 x i32> <i32 2, i32 1>
@@ -379,7 +379,7 @@ declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) n
define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, ptr %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_round_sd_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
@@ -407,7 +407,7 @@ define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, ptr %a1) #0
define <4 x float> @test_x86_sse41_round_ss_load(<4 x float> %a0, ptr %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_round_ss_load(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll
index 7bc9cf3..436a3b3 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll
@@ -13,205 +13,205 @@ define dso_local i64 @many_args() {
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 792), align 8
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 56) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 88) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 104) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 112) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 120) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 136) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 144) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 152) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 160) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 168) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 184) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 200) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 216) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 224) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 232) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 248) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 264) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 272) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 280) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 288) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 296) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 312) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 328) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 336) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 344) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 352) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 360) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 376) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 392) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 400) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 408) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 416) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 424) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 440) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 456) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 464) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 472) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 480) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 488) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 504) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 520) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 528) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 536) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 544) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 552) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 568) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 584) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 592) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 600) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 608) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 616) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 632) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 648) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 656) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 664) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 672) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 680) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 696) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 712) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 720) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 728) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 736) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 744) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 760) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 776) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 784) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 40), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 56), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 72), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 88), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 104), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 112), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 120), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 136), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 144), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 152), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 160), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 168), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 184), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 200), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 208), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 216), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 224), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 232), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 240), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 248), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 264), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 272), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 280), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 288), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 296), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 304), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 312), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 328), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 336), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 344), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 352), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 360), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 368), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 376), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 392), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 400), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 408), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 416), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 424), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 432), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 440), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 456), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 464), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 472), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 480), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 488), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 496), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 504), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 520), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 528), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 536), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 544), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 552), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 560), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 568), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 584), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 592), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 600), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 608), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 616), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 624), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 632), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 648), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 656), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 664), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 672), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 680), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 688), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 696), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 712), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 720), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 728), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 736), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 744), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 752), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 760), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 768), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 776), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 784), align 8
+; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 792), align 8
; CHECK-NEXT: store i64 1040, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
index 2745939..cc2d94c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
@@ -18,12 +18,12 @@ define dso_local i32 @test(i32 %a, i32 %b, i32 %c) local_unnamed_addr {
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (i32, ...) @sum(i32 3, i32 [[A]], i32 [[B]], i32 [[C]])
@@ -37,12 +37,12 @@ define dso_local i32 @test(i32 %a, i32 %b, i32 %c) local_unnamed_addr {
; ORIGIN-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; ORIGIN-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; ORIGIN-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; ORIGIN-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8
-; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; ORIGIN-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; ORIGIN-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; ORIGIN-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: [[CALL:%.*]] = tail call i32 (i32, ...) @sum(i32 3, i32 [[A]], i32 [[B]], i32 [[C]])
@@ -58,12 +58,12 @@ define dso_local i32 @test(i32 %a, i32 %b, i32 %c) local_unnamed_addr {
; ORIGIN2-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN2-NEXT: call void @llvm.donothing()
; ORIGIN2-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; ORIGIN2-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8
-; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; ORIGIN2-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN2-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; ORIGIN2-NEXT: [[CALL:%.*]] = tail call i32 (i32, ...) @sum(i32 3, i32 [[A]], i32 [[B]], i32 [[C]])
@@ -446,12 +446,12 @@ define dso_local i80 @test_i80(i80 %a, i80 %b, i80 %c) local_unnamed_addr {
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; CHECK-NEXT: store i80 0, ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
; CHECK-NEXT: store i64 48, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i80 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[CALL:%.*]] = tail call i80 (i32, ...) @sum_i80(i32 3, i80 [[A]], i80 [[B]], i80 [[C]])
@@ -465,12 +465,12 @@ define dso_local i80 @test_i80(i80 %a, i80 %b, i80 %c) local_unnamed_addr {
; ORIGIN-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; ORIGIN-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; ORIGIN-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; ORIGIN-NEXT: store i80 0, ptr @__msan_va_arg_tls, align 8
-; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
+; ORIGIN-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; ORIGIN-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
; ORIGIN-NEXT: store i64 48, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN-NEXT: store i80 0, ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: [[CALL:%.*]] = tail call i80 (i32, ...) @sum_i80(i32 3, i80 [[A]], i80 [[B]], i80 [[C]])
@@ -486,12 +486,12 @@ define dso_local i80 @test_i80(i80 %a, i80 %b, i80 %c) local_unnamed_addr {
; ORIGIN2-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN2-NEXT: call void @llvm.donothing()
; ORIGIN2-NEXT: store i32 0, ptr @__msan_param_tls, align 8
-; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; ORIGIN2-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN2-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; ORIGIN2-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; ORIGIN2-NEXT: store i80 0, ptr @__msan_va_arg_tls, align 8
-; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
+; ORIGIN2-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; ORIGIN2-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
; ORIGIN2-NEXT: store i64 48, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN2-NEXT: store i80 0, ptr @__msan_retval_tls, align 8
; ORIGIN2-NEXT: [[CALL:%.*]] = tail call i80 (i32, ...) @sum_i80(i32 3, i80 [[A]], i80 [[B]], i80 [[C]])
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
index 74a6276..681b331 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
@@ -40,8 +40,8 @@ define linkonce_odr dso_local void @_Z4testIcEvT_(i8 noundef signext %arg) sanit
; CHECK-NEXT: [[_MSPROP:%.*]] = sext i8 [[_MSLD]] to i32
; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32
; CHECK-NEXT: store i8 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 [[_MSPROP]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: store i32 [[_MSPROP]], ptr @__msan_va_arg_tls, align 8
; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i8, i32, ...) @_Z5test2IcEvT_iz(i8 noundef signext [[TMP7]], i32 noundef 1, i32 noundef [[CONV]])
@@ -82,8 +82,8 @@ define linkonce_odr dso_local void @_Z4testIiEvT_(i32 noundef %arg) sanitize_mem
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 4
; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i32, i32, ...) @_Z5test2IiEvT_iz(i32 noundef [[TMP7]], i32 noundef 1, i32 noundef [[TMP7]])
@@ -125,8 +125,8 @@ define linkonce_odr dso_local void @_Z4testIfEvT_(float noundef %arg) sanitize_m
; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[_MSLD]] to i64
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[TMP7]] to double
; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[TMP11]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: store i64 [[TMP11]], ptr @__msan_va_arg_tls, align 8
; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (float, i32, ...) @_Z5test2IfEvT_iz(float noundef [[TMP7]], i32 noundef 1, double noundef [[CONV]])
@@ -167,8 +167,8 @@ define linkonce_odr dso_local void @_Z4testIdEvT_(double noundef %arg) sanitize_
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (double, i32, ...) @_Z5test2IdEvT_iz(double noundef [[TMP7]], i32 noundef 1, double noundef [[TMP7]])
@@ -208,8 +208,8 @@ define linkonce_odr dso_local void @_Z4testIeEvT_(x86_fp80 noundef %arg) sanitiz
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i80, ptr [[TMP10]], align 16
; CHECK-NEXT: store i80 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i80 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i80 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: store i80 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (x86_fp80, i32, ...) @_Z5test2IeEvT_iz(x86_fp80 noundef [[TMP7]], i32 noundef 1, x86_fp80 noundef [[TMP7]])
@@ -249,8 +249,8 @@ define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(i64 %arg.coerce) sanitiz
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP9]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i64, i32, ...) @_Z5test2I6IntIntEvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]])
@@ -271,7 +271,7 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(i64 %arg.coerce0, i
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_INT64INT64:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
@@ -302,12 +302,12 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(i64 %arg.coerce0, i
; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
; CHECK-NEXT: [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i64, i64, i32, ...) @_Z5test2I10Int64Int64EvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]])
; CHECK-NEXT: ret void
@@ -330,7 +330,7 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(double %arg.coerc
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_DOUBLEDOUBLE:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
@@ -361,12 +361,12 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(double %arg.coerc
; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
; CHECK-NEXT: [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (double, double, i32, ...) @_Z5test2I12DoubleDoubleEvT_iz(double [[AGG_TMP_SROA_0_0_COPYLOAD]], double [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, double [[AGG_TMP_SROA_0_0_COPYLOAD]], double [[AGG_TMP_SROA_2_0_COPYLOAD]])
; CHECK-NEXT: ret void
@@ -399,11 +399,11 @@ define linkonce_odr dso_local void @_Z4testI7Double4EvT_(ptr noundef byval(%stru
; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 32, i1 false)
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -2147483649
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP8]], i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP8]], i64 32, i1 false)
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP11]], -2147483649
; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
@@ -426,7 +426,7 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(double %arg.coerce
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_DOUBLEFLOAT:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
@@ -457,12 +457,12 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(double %arg.coerce
; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
; CHECK-NEXT: [[_MSLD1:%.*]] = load i32, ptr [[TMP17]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: store i32 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i32 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
-; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: store i32 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (double, float, i32, ...) @_Z5test2I11DoubleFloatEvT_iz(double [[AGG_TMP_SROA_0_0_COPYLOAD]], float [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, double [[AGG_TMP_SROA_0_0_COPYLOAD]], float [[AGG_TMP_SROA_2_0_COPYLOAD]])
; CHECK-NEXT: ret void
@@ -495,11 +495,11 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble2EvT_(ptr noundef byval(
; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 32, i1 false)
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -2147483649
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP8]], i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP8]], i64 32, i1 false)
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP11]], -2147483649
; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
@@ -530,11 +530,11 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble4EvT_(ptr noundef byval(
; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 64, i1 false)
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -2147483649
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), ptr align 8 [[TMP8]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 72), ptr align 8 [[TMP8]], i64 64, i1 false)
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP11]], -2147483649
; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
@@ -1103,51 +1103,51 @@ define linkonce_odr dso_local void @_Z4test3I11LongDouble4EvT_(ptr noundef byval
; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 64, i1 false)
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -2147483649
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), ptr align 8 [[TMP8]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 72), ptr align 8 [[TMP8]], i64 64, i1 false)
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -2147483649
; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), ptr align 8 [[TMP11]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 136), ptr align 8 [[TMP11]], i64 64, i1 false)
; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], -2147483649
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), ptr align 8 [[TMP14]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 200), ptr align 8 [[TMP14]], i64 64, i1 false)
; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], -2147483649
; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), ptr align 8 [[TMP17]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 264), ptr align 8 [[TMP17]], i64 64, i1 false)
; CHECK-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP19:%.*]] = and i64 [[TMP18]], -2147483649
; CHECK-NEXT: [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), ptr align 8 [[TMP20]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 328), ptr align 8 [[TMP20]], i64 64, i1 false)
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = and i64 [[TMP21]], -2147483649
; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), ptr align 8 [[TMP23]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 392), ptr align 8 [[TMP23]], i64 64, i1 false)
; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP25:%.*]] = and i64 [[TMP24]], -2147483649
; CHECK-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), ptr align 8 [[TMP26]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 456), ptr align 8 [[TMP26]], i64 64, i1 false)
; CHECK-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], -2147483649
; CHECK-NEXT: [[TMP29:%.*]] = inttoptr i64 [[TMP28]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), ptr align 8 [[TMP29]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 520), ptr align 8 [[TMP29]], i64 64, i1 false)
; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP31:%.*]] = and i64 [[TMP30]], -2147483649
; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), ptr align 8 [[TMP32]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 584), ptr align 8 [[TMP32]], i64 64, i1 false)
; CHECK-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP34:%.*]] = and i64 [[TMP33]], -2147483649
; CHECK-NEXT: [[TMP35:%.*]] = inttoptr i64 [[TMP34]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), ptr align 8 [[TMP35]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 648), ptr align 8 [[TMP35]], i64 64, i1 false)
; CHECK-NEXT: [[TMP36:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP37:%.*]] = and i64 [[TMP36]], -2147483649
; CHECK-NEXT: [[TMP38:%.*]] = inttoptr i64 [[TMP37]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), ptr align 8 [[TMP38]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 712), ptr align 8 [[TMP38]], i64 64, i1 false)
; CHECK-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP42:%.*]] = and i64 [[TMP41]], -2147483649
; CHECK-NEXT: [[TMP43:%.*]] = inttoptr i64 [[TMP42]] to ptr
@@ -1155,47 +1155,47 @@ define linkonce_odr dso_local void @_Z4test3I11LongDouble4EvT_(ptr noundef byval
; CHECK-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP45:%.*]] = and i64 [[TMP44]], -2147483649
; CHECK-NEXT: [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), ptr align 8 [[TMP46]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), ptr align 8 [[TMP46]], i64 64, i1 false)
; CHECK-NEXT: [[TMP47:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP48:%.*]] = and i64 [[TMP47]], -2147483649
; CHECK-NEXT: [[TMP49:%.*]] = inttoptr i64 [[TMP48]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), ptr align 8 [[TMP49]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), ptr align 8 [[TMP49]], i64 64, i1 false)
; CHECK-NEXT: [[TMP50:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP51:%.*]] = and i64 [[TMP50]], -2147483649
; CHECK-NEXT: [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), ptr align 8 [[TMP52]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), ptr align 8 [[TMP52]], i64 64, i1 false)
; CHECK-NEXT: [[TMP53:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP54:%.*]] = and i64 [[TMP53]], -2147483649
; CHECK-NEXT: [[TMP55:%.*]] = inttoptr i64 [[TMP54]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), ptr align 8 [[TMP55]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), ptr align 8 [[TMP55]], i64 64, i1 false)
; CHECK-NEXT: [[TMP56:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP57:%.*]] = and i64 [[TMP56]], -2147483649
; CHECK-NEXT: [[TMP58:%.*]] = inttoptr i64 [[TMP57]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), ptr align 8 [[TMP58]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), ptr align 8 [[TMP58]], i64 64, i1 false)
; CHECK-NEXT: [[TMP59:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP60:%.*]] = and i64 [[TMP59]], -2147483649
; CHECK-NEXT: [[TMP61:%.*]] = inttoptr i64 [[TMP60]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), ptr align 8 [[TMP61]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), ptr align 8 [[TMP61]], i64 64, i1 false)
; CHECK-NEXT: [[TMP62:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP63:%.*]] = and i64 [[TMP62]], -2147483649
; CHECK-NEXT: [[TMP64:%.*]] = inttoptr i64 [[TMP63]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), ptr align 8 [[TMP64]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), ptr align 8 [[TMP64]], i64 64, i1 false)
; CHECK-NEXT: [[TMP65:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP66:%.*]] = and i64 [[TMP65]], -2147483649
; CHECK-NEXT: [[TMP67:%.*]] = inttoptr i64 [[TMP66]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), ptr align 8 [[TMP67]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), ptr align 8 [[TMP67]], i64 64, i1 false)
; CHECK-NEXT: [[TMP68:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP69:%.*]] = and i64 [[TMP68]], -2147483649
; CHECK-NEXT: [[TMP70:%.*]] = inttoptr i64 [[TMP69]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), ptr align 8 [[TMP70]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), ptr align 8 [[TMP70]], i64 64, i1 false)
; CHECK-NEXT: [[TMP71:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP72:%.*]] = and i64 [[TMP71]], -2147483649
; CHECK-NEXT: [[TMP73:%.*]] = inttoptr i64 [[TMP72]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), ptr align 8 [[TMP73]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), ptr align 8 [[TMP73]], i64 64, i1 false)
; CHECK-NEXT: [[TMP74:%.*]] = ptrtoint ptr [[ARG]] to i64
; CHECK-NEXT: [[TMP75:%.*]] = and i64 [[TMP74]], -2147483649
; CHECK-NEXT: [[TMP76:%.*]] = inttoptr i64 [[TMP75]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), ptr align 8 [[TMP76]], i64 64, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), ptr align 8 [[TMP76]], i64 64, i1 false)
; CHECK-NEXT: store i64 1280, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], i32 noundef 20, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]])
; CHECK-NEXT: ret void
diff --git a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
index ff37605..3ac6844 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
@@ -19,7 +19,7 @@ declare void @llvm.masked.compressstore.v16f32(<16 x float>, ptr, <16 x i1>)
define void @Store(ptr %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory {
; CHECK-LABEL: @Store(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
@@ -30,9 +30,9 @@ define void @Store(ptr %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory {
;
; ADDR-LABEL: @Store(
; ADDR-NEXT: entry:
-; ADDR-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; ADDR-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
; ADDR-NEXT: call void @llvm.donothing()
; ADDR-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ADDR-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
@@ -52,8 +52,8 @@ define void @Store(ptr %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory {
;
; ORIGINS-LABEL: @Store(
; ORIGINS-NEXT: entry:
-; ORIGINS-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGINS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGINS-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGINS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGINS-NEXT: call void @llvm.donothing()
; ORIGINS-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ORIGINS-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
@@ -88,7 +88,7 @@ entry:
define <4 x double> @Load(ptr %p, <4 x double> %v, <4 x i1> %mask) sanitize_memory {
; CHECK-LABEL: @Load(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
@@ -101,8 +101,8 @@ define <4 x double> @Load(ptr %p, <4 x double> %v, <4 x i1> %mask) sanitize_memo
; ADDR-LABEL: @Load(
; ADDR-NEXT: entry:
; ADDR-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; ADDR-NEXT: [[TMP1:%.*]] = load <4 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; ADDR-NEXT: [[TMP1:%.*]] = load <4 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8
+; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; ADDR-NEXT: call void @llvm.donothing()
; ADDR-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ADDR-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
@@ -123,8 +123,8 @@ define <4 x double> @Load(ptr %p, <4 x double> %v, <4 x i1> %mask) sanitize_memo
;
; ORIGINS-LABEL: @Load(
; ORIGINS-NEXT: entry:
-; ORIGINS-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGINS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGINS-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGINS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGINS-NEXT: call void @llvm.donothing()
; ORIGINS-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ORIGINS-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
@@ -235,7 +235,7 @@ entry:
; FIXME: Provide real implementation.
define <16 x float> @Gather(<16 x ptr> %ptrs, <16 x i1> %mask, <16 x float> %passthru) sanitize_memory {
; CHECK-LABEL: @Gather(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i64> [[TMP2]], splat (i64 87960930222080)
@@ -246,9 +246,9 @@ define <16 x float> @Gather(<16 x ptr> %ptrs, <16 x i1> %mask, <16 x float> %pas
; CHECK-NEXT: ret <16 x float> [[RET]]
;
; ADDR-LABEL: @Gather(
-; ADDR-NEXT: [[TMP1:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; ADDR-NEXT: [[TMP1:%.*]] = load <16 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr @__msan_param_tls, align 8
-; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
; ADDR-NEXT: call void @llvm.donothing()
; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <16 x i1> [[MASK:%.*]], <16 x i64> [[TMP2]], <16 x i64> zeroinitializer
; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64>
@@ -270,8 +270,8 @@ define <16 x float> @Gather(<16 x ptr> %ptrs, <16 x i1> %mask, <16 x float> %pas
; ADDR-NEXT: ret <16 x float> [[RET]]
;
; ORIGINS-LABEL: @Gather(
-; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
-; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 136) to ptr), align 4
+; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8
+; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 136), align 4
; ORIGINS-NEXT: call void @llvm.donothing()
; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64>
; ORIGINS-NEXT: [[TMP4:%.*]] = xor <16 x i64> [[TMP3]], splat (i64 87960930222080)
@@ -326,8 +326,8 @@ define void @Scatter(<8 x i32> %value, <8 x ptr> %ptrs, <8 x i1> %mask) sanitize
; CHECK-NEXT: ret void
;
; ADDR-LABEL: @Scatter(
-; ADDR-NEXT: [[TMP1:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
-; ADDR-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; ADDR-NEXT: [[TMP1:%.*]] = load <8 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8
+; ADDR-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; ADDR-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; ADDR-NEXT: call void @llvm.donothing()
; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <8 x i1> [[MASK:%.*]], <8 x i64> [[TMP2]], <8 x i64> zeroinitializer
@@ -403,7 +403,7 @@ define void @ScatterNoSanitize(<8 x i32> %value, <8 x ptr> %ptrs, <8 x i1> %mask
; FIXME: Provide real implementation.
define <16 x float> @ExpandLoad(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru) sanitize_memory {
; CHECK-LABEL: @ExpandLoad(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
@@ -415,8 +415,8 @@ define <16 x float> @ExpandLoad(ptr %ptr, <16 x i1> %mask, <16 x float> %passthr
;
; ADDR-LABEL: @ExpandLoad(
; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; ADDR-NEXT: call void @llvm.donothing()
; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
; ADDR-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
@@ -436,8 +436,8 @@ define <16 x float> @ExpandLoad(ptr %ptr, <16 x i1> %mask, <16 x float> %passthr
; ADDR-NEXT: ret <16 x float> [[RET]]
;
; ORIGINS-LABEL: @ExpandLoad(
-; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; ORIGINS-NEXT: call void @llvm.donothing()
; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
; ORIGINS-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
@@ -492,8 +492,8 @@ define void @CompressStore(<16 x float> %value, ptr %ptr, <16 x i1> %mask) sanit
; CHECK-NEXT: ret void
;
; ADDR-LABEL: @CompressStore(
-; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
+; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8
; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; ADDR-NEXT: call void @llvm.donothing()
; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
index b4feb1e..0ad9e4d 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
@@ -18,7 +18,7 @@ define void @Store(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
; CHECK-LABEL: define void @Store(
; CHECK-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
@@ -30,8 +30,8 @@ define void @Store(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
; ORIGIN-LABEL: define void @Store(
; ORIGIN-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
; ORIGIN-NEXT: [[ENTRY:.*:]]
-; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
@@ -53,8 +53,8 @@ define void @Store(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
; CALLS-NEXT: [[ENTRY:.*:]]
; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
@@ -80,7 +80,7 @@ define void @AlignedStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_me
; CHECK-LABEL: define void @AlignedStore(
; CHECK-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
@@ -92,8 +92,8 @@ define void @AlignedStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_me
; ORIGIN-LABEL: define void @AlignedStore(
; ORIGIN-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
; ORIGIN-NEXT: [[ENTRY:.*:]]
-; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
@@ -118,8 +118,8 @@ define void @AlignedStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_me
; CALLS-NEXT: [[ENTRY:.*:]]
; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
@@ -353,7 +353,7 @@ define void @FuncWithPhi(ptr nocapture %a, ptr %b, ptr nocapture %c) nounwind uw
; CHECK-LABEL: define void @FuncWithPhi(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr [[B:%.*]], ptr captures(none) [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 0
@@ -397,8 +397,8 @@ define void @FuncWithPhi(ptr nocapture %a, ptr %b, ptr nocapture %c) nounwind uw
; ORIGIN-LABEL: define void @FuncWithPhi(
; ORIGIN-SAME: ptr captures(none) [[A:%.*]], ptr [[B:%.*]], ptr captures(none) [[C:%.*]]) #[[ATTR0]] {
; ORIGIN-NEXT: [[ENTRY:.*:]]
-; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[B]] to i64
; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 0
@@ -457,10 +457,10 @@ define void @FuncWithPhi(ptr nocapture %a, ptr %b, ptr nocapture %c) nounwind uw
; CALLS-LABEL: define void @FuncWithPhi(
; CALLS-SAME: ptr captures(none) [[A:%.*]], ptr [[B:%.*]], ptr captures(none) [[C:%.*]]) #[[ATTR0]] {
; CALLS-NEXT: [[ENTRY:.*:]]
-; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
-; CALLS-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
+; CALLS-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; CALLS-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CALLS-NEXT: call void @llvm.donothing()
@@ -770,8 +770,8 @@ define void @SExt(ptr nocapture %a, ptr nocapture %b) nounwind uwtable sanitize_
; CALLS-LABEL: define void @SExt(
; CALLS-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) #[[ATTR0]] {
; CALLS-NEXT: [[ENTRY:.*:]]
-; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CALLS-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CALLS-NEXT: call void @llvm.donothing()
@@ -844,7 +844,7 @@ define void @MemCpy(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitiz
; CHECK-LABEL: define void @MemCpy(
; CHECK-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10)
; CHECK-NEXT: ret void
@@ -852,8 +852,8 @@ define void @MemCpy(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitiz
; ORIGIN-LABEL: define void @MemCpy(
; ORIGIN-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
; ORIGIN-NEXT: [[ENTRY:.*:]]
-; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10)
; ORIGIN-NEXT: ret void
@@ -861,8 +861,8 @@ define void @MemCpy(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitiz
; CALLS-LABEL: define void @MemCpy(
; CALLS-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
; CALLS-NEXT: [[ENTRY:.*:]]
-; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10)
; CALLS-NEXT: ret void
@@ -911,7 +911,7 @@ define void @MemCpyInline(ptr nocapture %x, ptr nocapture %y) nounwind uwtable s
; CHECK-LABEL: define void @MemCpyInline(
; CHECK-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10)
; CHECK-NEXT: ret void
@@ -919,8 +919,8 @@ define void @MemCpyInline(ptr nocapture %x, ptr nocapture %y) nounwind uwtable s
; ORIGIN-LABEL: define void @MemCpyInline(
; ORIGIN-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
; ORIGIN-NEXT: [[ENTRY:.*:]]
-; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10)
; ORIGIN-NEXT: ret void
@@ -928,8 +928,8 @@ define void @MemCpyInline(ptr nocapture %x, ptr nocapture %y) nounwind uwtable s
; CALLS-LABEL: define void @MemCpyInline(
; CALLS-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
; CALLS-NEXT: [[ENTRY:.*:]]
-; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10)
; CALLS-NEXT: ret void
@@ -947,7 +947,7 @@ define void @MemMove(ptr nocapture %x, ptr nocapture %y) nounwind uwtable saniti
; CHECK-LABEL: define void @MemMove(
; CHECK-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__msan_memmove(ptr [[X]], ptr [[Y]], i64 10)
; CHECK-NEXT: ret void
@@ -955,8 +955,8 @@ define void @MemMove(ptr nocapture %x, ptr nocapture %y) nounwind uwtable saniti
; ORIGIN-LABEL: define void @MemMove(
; ORIGIN-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
; ORIGIN-NEXT: [[ENTRY:.*:]]
-; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP2:%.*]] = call ptr @__msan_memmove(ptr [[X]], ptr [[Y]], i64 10)
; ORIGIN-NEXT: ret void
@@ -964,8 +964,8 @@ define void @MemMove(ptr nocapture %x, ptr nocapture %y) nounwind uwtable saniti
; CALLS-LABEL: define void @MemMove(
; CALLS-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
; CALLS-NEXT: [[ENTRY:.*:]]
-; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: [[TMP2:%.*]] = call ptr @__msan_memmove(ptr [[X]], ptr [[Y]], i64 10)
; CALLS-NEXT: ret void
@@ -1065,9 +1065,9 @@ define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_mem
; CHECK-LABEL: define i32 @Select(
; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[C]], i32 [[TMP1]], i32 [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = xor i32 [[A]], [[B]]
@@ -1081,12 +1081,12 @@ define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_mem
; ORIGIN-LABEL: define i32 @Select(
; ORIGIN-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] {
; ORIGIN-NEXT: [[ENTRY:.*:]]
-; ORIGIN-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; ORIGIN-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP6:%.*]] = select i1 [[C]], i32 [[TMP2]], i32 [[TMP4]]
; ORIGIN-NEXT: [[TMP7:%.*]] = xor i32 [[A]], [[B]]
@@ -1103,12 +1103,12 @@ define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_mem
; CALLS-LABEL: define i32 @Select(
; CALLS-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] {
; CALLS-NEXT: [[ENTRY:.*:]]
-; CALLS-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CALLS-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CALLS-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: [[TMP6:%.*]] = select i1 [[C]], i32 [[TMP2]], i32 [[TMP4]]
; CALLS-NEXT: [[TMP7:%.*]] = xor i32 [[A]], [[B]]
@@ -1135,9 +1135,9 @@ define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind
; CHECK-LABEL: define <8 x i16> @SelectVector(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[C]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i16> [[A]], [[B]]
@@ -1151,12 +1151,12 @@ define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind
; ORIGIN-LABEL: define <8 x i16> @SelectVector(
; ORIGIN-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[C:%.*]]) #[[ATTR0]] {
; ORIGIN-NEXT: [[ENTRY:.*:]]
-; ORIGIN-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; ORIGIN-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4
; ORIGIN-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; ORIGIN-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; ORIGIN-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP6:%.*]] = select <8 x i1> [[C]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]]
; ORIGIN-NEXT: [[TMP7:%.*]] = xor <8 x i16> [[A]], [[B]]
@@ -1177,12 +1177,12 @@ define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind
; CALLS-LABEL: define <8 x i16> @SelectVector(
; CALLS-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[C:%.*]]) #[[ATTR0]] {
; CALLS-NEXT: [[ENTRY:.*:]]
-; CALLS-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CALLS-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4
; CALLS-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CALLS-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: [[TMP6:%.*]] = select <8 x i1> [[C]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]]
; CALLS-NEXT: [[TMP7:%.*]] = xor <8 x i16> [[A]], [[B]]
@@ -1213,9 +1213,9 @@ define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwta
; CHECK-LABEL: define <8 x i16> @SelectVector2(
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[C]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i16> [[A]], [[B]]
@@ -1229,12 +1229,12 @@ define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwta
; ORIGIN-LABEL: define <8 x i16> @SelectVector2(
; ORIGIN-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] {
; ORIGIN-NEXT: [[ENTRY:.*:]]
-; ORIGIN-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; ORIGIN-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4
; ORIGIN-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; ORIGIN-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; ORIGIN-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP6:%.*]] = select i1 [[C]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]]
; ORIGIN-NEXT: [[TMP7:%.*]] = xor <8 x i16> [[A]], [[B]]
@@ -1251,12 +1251,12 @@ define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwta
; CALLS-LABEL: define <8 x i16> @SelectVector2(
; CALLS-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] {
; CALLS-NEXT: [[ENTRY:.*:]]
-; CALLS-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CALLS-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4
; CALLS-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CALLS-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: [[TMP6:%.*]] = select i1 [[C]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]]
; CALLS-NEXT: [[TMP7:%.*]] = xor <8 x i16> [[A]], [[B]]
@@ -1280,8 +1280,8 @@ define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %
; CHECK-SAME: i1 zeroext [[X:%.*]], { i64, i64 } [[A:%.*]], { i64, i64 } [[B:%.*]]) #[[ATTR6:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[X]], { i64, i64 } [[TMP1]], { i64, i64 } [[TMP2]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP3]]
@@ -1294,10 +1294,10 @@ define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %
; ORIGIN-NEXT: [[ENTRY:.*:]]
; ORIGIN-NEXT: [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; ORIGIN-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
-; ORIGIN-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; ORIGIN-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
+; ORIGIN-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP6:%.*]] = select i1 [[X]], { i64, i64 } [[TMP2]], { i64, i64 } [[TMP4]]
; ORIGIN-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP6]]
@@ -1313,10 +1313,10 @@ define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %
; CALLS-NEXT: [[ENTRY:.*:]]
; CALLS-NEXT: [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
-; CALLS-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CALLS-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
+; CALLS-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: [[TMP6:%.*]] = select i1 [[X]], { i64, i64 } [[TMP2]], { i64, i64 } [[TMP4]]
; CALLS-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP6]]
@@ -1337,8 +1337,8 @@ define { ptr, double } @SelectStruct2(i1 zeroext %x, { ptr, double } %a, { ptr,
; CHECK-SAME: i1 zeroext [[X:%.*]], { ptr, double } [[A:%.*]], { ptr, double } [[B:%.*]]) #[[ATTR6]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[X]], { i64, i64 } [[TMP1]], { i64, i64 } [[TMP2]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP3]]
@@ -1351,10 +1351,10 @@ define { ptr, double } @SelectStruct2(i1 zeroext %x, { ptr, double } %a, { ptr,
; ORIGIN-NEXT: [[ENTRY:.*:]]
; ORIGIN-NEXT: [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; ORIGIN-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
-; ORIGIN-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; ORIGIN-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
+; ORIGIN-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP6:%.*]] = select i1 [[X]], { i64, i64 } [[TMP2]], { i64, i64 } [[TMP4]]
; ORIGIN-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP6]]
@@ -1370,10 +1370,10 @@ define { ptr, double } @SelectStruct2(i1 zeroext %x, { ptr, double } %a, { ptr,
; CALLS-NEXT: [[ENTRY:.*:]]
; CALLS-NEXT: [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
-; CALLS-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CALLS-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
+; CALLS-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: [[TMP6:%.*]] = select i1 [[X]], { i64, i64 } [[TMP2]], { i64, i64 } [[TMP4]]
; CALLS-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP6]]
@@ -1475,7 +1475,7 @@ define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
; CHECK-LABEL: define i32 @Div(
; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP0]], 0
@@ -1491,8 +1491,8 @@ define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
; ORIGIN-LABEL: define i32 @Div(
; ORIGIN-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
; ORIGIN-NEXT: [[ENTRY:.*:]]
-; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; ORIGIN-NEXT: call void @llvm.donothing()
@@ -1510,8 +1510,8 @@ define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
; CALLS-LABEL: define i32 @Div(
; CALLS-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
; CALLS-NEXT: [[ENTRY:.*:]]
-; CALLS-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CALLS-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CALLS-NEXT: call void @llvm.donothing()
@@ -1533,7 +1533,7 @@ define float @FDiv(float %a, float %b) nounwind uwtable readnone sanitize_memory
; CHECK-SAME: float [[A:%.*]], float [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP0]], [[TMP1]]
; CHECK-NEXT: [[C:%.*]] = fdiv float [[A]], [[B]]
@@ -1545,8 +1545,8 @@ define float @FDiv(float %a, float %b) nounwind uwtable readnone sanitize_memory
; ORIGIN-NEXT: [[ENTRY:.*:]]
; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP0]], [[TMP2]]
; ORIGIN-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], 0
@@ -1561,8 +1561,8 @@ define float @FDiv(float %a, float %b) nounwind uwtable readnone sanitize_memory
; CALLS-NEXT: [[ENTRY:.*:]]
; CALLS-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP0]], [[TMP2]]
; CALLS-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], 0
@@ -2416,7 +2416,7 @@ define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
; CHECK-LABEL: define i32 @ExtractElement(
; CHECK-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]]) #[[ATTR6]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP2]], i32 [[IDX]]
@@ -2432,8 +2432,8 @@ define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
;
; ORIGIN-LABEL: define i32 @ExtractElement(
; ORIGIN-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]]) #[[ATTR6]] {
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; ORIGIN-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; ORIGIN-NEXT: call void @llvm.donothing()
@@ -2451,8 +2451,8 @@ define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
;
; CALLS-LABEL: define i32 @ExtractElement(
; CALLS-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]]) #[[ATTR6]] {
-; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; CALLS-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CALLS-NEXT: call void @llvm.donothing()
@@ -2470,9 +2470,9 @@ define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @InsertElement(
; CHECK-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]], i32 [[X:%.*]]) #[[ATTR6]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[TMP3]], i32 [[IDX]]
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
@@ -2487,12 +2487,12 @@ define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memor
;
; ORIGIN-LABEL: define <4 x i32> @InsertElement(
; ORIGIN-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]], i32 [[X:%.*]]) #[[ATTR6]] {
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; ORIGIN-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; ORIGIN-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; ORIGIN-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[TMP5]], i32 [[IDX]]
; ORIGIN-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 0
@@ -2512,12 +2512,12 @@ define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memor
;
; CALLS-LABEL: define <4 x i32> @InsertElement(
; CALLS-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]], i32 [[X:%.*]]) #[[ATTR6]] {
-; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; CALLS-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CALLS-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CALLS-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[TMP5]], i32 [[IDX]]
; CALLS-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 0
@@ -2538,7 +2538,7 @@ define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory
; CHECK-LABEL: define <4 x i32> @ShuffleVector(
; CHECK-SAME: <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]]) #[[ATTR6]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>
; CHECK-NEXT: [[VEC2:%.*]] = shufflevector <4 x i32> [[VEC]], <4 x i32> [[VEC1]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -2549,8 +2549,8 @@ define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory
; ORIGIN-SAME: <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]]) #[[ATTR6]] {
; ORIGIN-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; ORIGIN-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; ORIGIN-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>
; ORIGIN-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
@@ -2565,8 +2565,8 @@ define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory
; CALLS-SAME: <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]]) #[[ATTR6]] {
; CALLS-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CALLS-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CALLS-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CALLS-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>
; CALLS-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
@@ -2761,17 +2761,13 @@ define void @VAStart(i32 %x, ...) sanitize_memory {
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP16]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[VA]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 16
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[VA]], i64 16
; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[VA]] to i64
-; CHECK-NEXT: [[TMP25:%.*]] = add i64 [[TMP24]], 8
-; CHECK-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[VA]], i64 8
; CHECK-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8
; CHECK-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[TMP27]] to i64
; CHECK-NEXT: [[TMP29:%.*]] = xor i64 [[TMP28]], 87960930222080
@@ -2832,9 +2828,7 @@ define void @VAStart(i32 %x, ...) sanitize_memory {
; ORIGIN-NEXT: [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr
; ORIGIN-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 24, i1 false)
; ORIGIN-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
-; ORIGIN-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[VA]] to i64
-; ORIGIN-NEXT: [[TMP32:%.*]] = add i64 [[TMP31]], 16
-; ORIGIN-NEXT: [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to ptr
+; ORIGIN-NEXT: [[TMP33:%.*]] = getelementptr i8, ptr [[VA]], i64 16
; ORIGIN-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8
; ORIGIN-NEXT: [[TMP35:%.*]] = ptrtoint ptr [[TMP34]] to i64
; ORIGIN-NEXT: [[TMP36:%.*]] = xor i64 [[TMP35]], 87960930222080
@@ -2843,9 +2837,7 @@ define void @VAStart(i32 %x, ...) sanitize_memory {
; ORIGIN-NEXT: [[TMP39:%.*]] = inttoptr i64 [[TMP38]] to ptr
; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP37]], ptr align 16 [[TMP2]], i64 176, i1 false)
; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP39]], ptr align 16 [[TMP4]], i64 176, i1 false)
-; ORIGIN-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[VA]] to i64
-; ORIGIN-NEXT: [[TMP41:%.*]] = add i64 [[TMP40]], 8
-; ORIGIN-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr
+; ORIGIN-NEXT: [[TMP42:%.*]] = getelementptr i8, ptr [[VA]], i64 8
; ORIGIN-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8
; ORIGIN-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64
; ORIGIN-NEXT: [[TMP45:%.*]] = xor i64 [[TMP44]], 87960930222080
@@ -2905,9 +2897,7 @@ define void @VAStart(i32 %x, ...) sanitize_memory {
; CALLS-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
; CALLS-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP26]], i8 0, i64 24, i1 false)
; CALLS-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
-; CALLS-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[VA]] to i64
-; CALLS-NEXT: [[TMP30:%.*]] = add i64 [[TMP29]], 16
-; CALLS-NEXT: [[TMP31:%.*]] = inttoptr i64 [[TMP30]] to ptr
+; CALLS-NEXT: [[TMP31:%.*]] = getelementptr i8, ptr [[VA]], i64 16
; CALLS-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8
; CALLS-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[TMP32]] to i64
; CALLS-NEXT: [[TMP34:%.*]] = xor i64 [[TMP33]], 87960930222080
@@ -2916,9 +2906,7 @@ define void @VAStart(i32 %x, ...) sanitize_memory {
; CALLS-NEXT: [[TMP37:%.*]] = inttoptr i64 [[TMP36]] to ptr
; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP35]], ptr align 16 [[TMP2]], i64 176, i1 false)
; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP37]], ptr align 16 [[TMP4]], i64 176, i1 false)
-; CALLS-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[VA]] to i64
-; CALLS-NEXT: [[TMP39:%.*]] = add i64 [[TMP38]], 8
-; CALLS-NEXT: [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CALLS-NEXT: [[TMP40:%.*]] = getelementptr i8, ptr [[VA]], i64 8
; CALLS-NEXT: [[TMP41:%.*]] = load ptr, ptr [[TMP40]], align 8
; CALLS-NEXT: [[TMP42:%.*]] = ptrtoint ptr [[TMP41]] to i64
; CALLS-NEXT: [[TMP43:%.*]] = xor i64 [[TMP42]], 87960930222080
@@ -2948,7 +2936,7 @@ define void @VolatileStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_m
; CHECK-LABEL: define void @VolatileStore(
; CHECK-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
@@ -2960,8 +2948,8 @@ define void @VolatileStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_m
; ORIGIN-LABEL: define void @VolatileStore(
; ORIGIN-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
; ORIGIN-NEXT: [[ENTRY:.*:]]
-; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
@@ -2983,8 +2971,8 @@ define void @VolatileStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_m
; CALLS-NEXT: [[ENTRY:.*:]]
; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
@@ -3333,7 +3321,7 @@ define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory
; CHECK-LABEL: define <2 x i64> @ArgumentShadowAlignment(
; CHECK-SAME: i64 [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR6]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store <2 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <2 x i64> [[B]]
@@ -3341,8 +3329,8 @@ define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory
; ORIGIN-LABEL: define <2 x i64> @ArgumentShadowAlignment(
; ORIGIN-SAME: i64 [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR6]] {
; ORIGIN-NEXT: [[ENTRY:.*:]]
-; ORIGIN-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: store <2 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
@@ -3351,8 +3339,8 @@ define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory
; CALLS-LABEL: define <2 x i64> @ArgumentShadowAlignment(
; CALLS-SAME: i64 [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR6]] {
; CALLS-NEXT: [[ENTRY:.*:]]
-; CALLS-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CALLS-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: store <2 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8
; CALLS-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
@@ -3371,7 +3359,7 @@ define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory {
; CHECK-SAME: i64 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR6]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 [[TMP0]], 0
; CHECK-NEXT: [[A:%.*]] = insertvalue { i64, i32 } undef, i64 [[X]], 0
@@ -3385,8 +3373,8 @@ define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory {
; ORIGIN-NEXT: [[ENTRY:.*:]]
; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP4:%.*]] = insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 [[TMP0]], 0
; ORIGIN-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP0]], 0
@@ -3405,8 +3393,8 @@ define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory {
; CALLS-NEXT: [[ENTRY:.*:]]
; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CALLS-NEXT: call void @llvm.donothing()
; CALLS-NEXT: [[TMP4:%.*]] = insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 [[TMP0]], 0
; CALLS-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP0]], 0
@@ -3458,22 +3446,22 @@ define void @VAArgStruct(ptr nocapture %s) sanitize_memory {
; CHECK-NEXT: [[_MSLD2:%.*]] = load i64, ptr [[TMP9]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16)
; CHECK-NEXT: store i32 -1, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
; CHECK-NEXT: [[TMP12:%.*]] = xor i64 [[TMP11]], 87960930222080
; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP13]], i64 16, i1 false)
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP13]], i64 16, i1 false)
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
+; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP16]], i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP16]], i64 16, i1 false)
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]])
; CHECK-NEXT: ret void
@@ -3515,48 +3503,48 @@ define void @VAArgStruct(ptr nocapture %s) sanitize_memory {
; ORIGIN-NEXT: [[TMP20:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16)
; ORIGIN-NEXT: store i32 -1, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: store i32 0, ptr @__msan_param_origin_tls, align 4
-; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
-; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
-; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; ORIGIN-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
-; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; ORIGIN-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
+; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; ORIGIN-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
+; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; ORIGIN-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4
+; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; ORIGIN-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4
; ORIGIN-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
; ORIGIN-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
; ORIGIN-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
; ORIGIN-NEXT: [[TMP24:%.*]] = add i64 [[TMP22]], 17592186044416
; ORIGIN-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP23]], i64 16, i1 false)
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 40) to ptr), ptr align 4 [[TMP25]], i64 16, i1 false)
-; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP23]], i64 16, i1 false)
+; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 40), ptr align 4 [[TMP25]], i64 16, i1 false)
+; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
; ORIGIN-NEXT: [[TMP26:%.*]] = zext i32 [[TMP13]] to i64
; ORIGIN-NEXT: [[TMP27:%.*]] = shl i64 [[TMP26]], 32
; ORIGIN-NEXT: [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]]
-; ORIGIN-NEXT: store i64 [[TMP28]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; ORIGIN-NEXT: store i64 [[TMP28]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 8), align 8
+; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; ORIGIN-NEXT: [[TMP29:%.*]] = zext i32 [[TMP19]] to i64
; ORIGIN-NEXT: [[TMP30:%.*]] = shl i64 [[TMP29]], 32
; ORIGIN-NEXT: [[TMP31:%.*]] = or i64 [[TMP29]], [[TMP30]]
-; ORIGIN-NEXT: store i64 [[TMP31]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
+; ORIGIN-NEXT: store i64 [[TMP31]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 16), align 8
+; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
; ORIGIN-NEXT: [[TMP32:%.*]] = zext i32 [[TMP13]] to i64
; ORIGIN-NEXT: [[TMP33:%.*]] = shl i64 [[TMP32]], 32
; ORIGIN-NEXT: [[TMP34:%.*]] = or i64 [[TMP32]], [[TMP33]]
-; ORIGIN-NEXT: store i64 [[TMP34]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8
-; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
+; ORIGIN-NEXT: store i64 [[TMP34]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 24), align 8
+; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
; ORIGIN-NEXT: [[TMP35:%.*]] = zext i32 [[TMP19]] to i64
; ORIGIN-NEXT: [[TMP36:%.*]] = shl i64 [[TMP35]], 32
; ORIGIN-NEXT: [[TMP37:%.*]] = or i64 [[TMP35]], [[TMP36]]
-; ORIGIN-NEXT: store i64 [[TMP37]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 32) to ptr), align 8
+; ORIGIN-NEXT: store i64 [[TMP37]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 32), align 8
; ORIGIN-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
; ORIGIN-NEXT: [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080
; ORIGIN-NEXT: [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
; ORIGIN-NEXT: [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416
; ORIGIN-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP40]], i64 16, i1 false)
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 176) to ptr), ptr align 8 [[TMP42]], i64 16, i1 false)
+; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP40]], i64 16, i1 false)
+; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 176), ptr align 8 [[TMP42]], i64 16, i1 false)
; ORIGIN-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN-NEXT: call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]])
; ORIGIN-NEXT: ret void
@@ -3600,48 +3588,48 @@ define void @VAArgStruct(ptr nocapture %s) sanitize_memory {
; CALLS-NEXT: [[TMP20:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16)
; CALLS-NEXT: store i32 -1, ptr @__msan_param_tls, align 8
; CALLS-NEXT: store i32 0, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
-; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CALLS-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
-; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CALLS-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
-; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CALLS-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
+; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CALLS-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
+; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CALLS-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4
+; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CALLS-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4
; CALLS-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
; CALLS-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
; CALLS-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
; CALLS-NEXT: [[TMP24:%.*]] = add i64 [[TMP22]], 17592186044416
; CALLS-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP23]], i64 16, i1 false)
-; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 40) to ptr), ptr align 4 [[TMP25]], i64 16, i1 false)
-; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP23]], i64 16, i1 false)
+; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 40), ptr align 4 [[TMP25]], i64 16, i1 false)
+; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
; CALLS-NEXT: [[TMP26:%.*]] = zext i32 [[TMP13]] to i64
; CALLS-NEXT: [[TMP27:%.*]] = shl i64 [[TMP26]], 32
; CALLS-NEXT: [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]]
-; CALLS-NEXT: store i64 [[TMP28]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CALLS-NEXT: store i64 [[TMP28]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 8), align 8
+; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CALLS-NEXT: [[TMP29:%.*]] = zext i32 [[TMP19]] to i64
; CALLS-NEXT: [[TMP30:%.*]] = shl i64 [[TMP29]], 32
; CALLS-NEXT: [[TMP31:%.*]] = or i64 [[TMP29]], [[TMP30]]
-; CALLS-NEXT: store i64 [[TMP31]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8
-; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
+; CALLS-NEXT: store i64 [[TMP31]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 16), align 8
+; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
; CALLS-NEXT: [[TMP32:%.*]] = zext i32 [[TMP13]] to i64
; CALLS-NEXT: [[TMP33:%.*]] = shl i64 [[TMP32]], 32
; CALLS-NEXT: [[TMP34:%.*]] = or i64 [[TMP32]], [[TMP33]]
-; CALLS-NEXT: store i64 [[TMP34]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8
-; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
+; CALLS-NEXT: store i64 [[TMP34]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 24), align 8
+; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
; CALLS-NEXT: [[TMP35:%.*]] = zext i32 [[TMP19]] to i64
; CALLS-NEXT: [[TMP36:%.*]] = shl i64 [[TMP35]], 32
; CALLS-NEXT: [[TMP37:%.*]] = or i64 [[TMP35]], [[TMP36]]
-; CALLS-NEXT: store i64 [[TMP37]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 32) to ptr), align 8
+; CALLS-NEXT: store i64 [[TMP37]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 32), align 8
; CALLS-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
; CALLS-NEXT: [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080
; CALLS-NEXT: [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
; CALLS-NEXT: [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416
; CALLS-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr
-; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP40]], i64 16, i1 false)
-; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 176) to ptr), ptr align 8 [[TMP42]], i64 16, i1 false)
+; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP40]], i64 16, i1 false)
+; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 176), ptr align 8 [[TMP42]], i64 16, i1 false)
; CALLS-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CALLS-NEXT: call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]])
; CALLS-NEXT: ret void
@@ -3685,22 +3673,22 @@ define void @VAArgStructNoSSE(ptr nocapture %s) sanitize_memory #0 {
; CHECK-NEXT: [[_MSLD2:%.*]] = load i64, ptr [[TMP9]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16)
; CHECK-NEXT: store i32 -1, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
; CHECK-NEXT: [[TMP12:%.*]] = xor i64 [[TMP11]], 87960930222080
; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP13]], i64 16, i1 false)
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP13]], i64 16, i1 false)
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
+; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
+; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), ptr align 8 [[TMP16]], i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), ptr align 8 [[TMP16]], i64 16, i1 false)
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]])
; CHECK-NEXT: ret void
@@ -3742,48 +3730,48 @@ define void @VAArgStructNoSSE(ptr nocapture %s) sanitize_memory #0 {
; ORIGIN-NEXT: [[TMP20:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16)
; ORIGIN-NEXT: store i32 -1, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: store i32 0, ptr @__msan_param_origin_tls, align 4
-; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
-; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
-; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; ORIGIN-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
-; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; ORIGIN-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
+; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; ORIGIN-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
+; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; ORIGIN-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4
+; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; ORIGIN-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4
; ORIGIN-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
; ORIGIN-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
; ORIGIN-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
; ORIGIN-NEXT: [[TMP24:%.*]] = add i64 [[TMP22]], 17592186044416
; ORIGIN-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP23]], i64 16, i1 false)
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 40) to ptr), ptr align 4 [[TMP25]], i64 16, i1 false)
-; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP23]], i64 16, i1 false)
+; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 40), ptr align 4 [[TMP25]], i64 16, i1 false)
+; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
; ORIGIN-NEXT: [[TMP26:%.*]] = zext i32 [[TMP13]] to i64
; ORIGIN-NEXT: [[TMP27:%.*]] = shl i64 [[TMP26]], 32
; ORIGIN-NEXT: [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]]
-; ORIGIN-NEXT: store i64 [[TMP28]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; ORIGIN-NEXT: store i64 [[TMP28]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 8), align 8
+; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; ORIGIN-NEXT: [[TMP29:%.*]] = zext i32 [[TMP19]] to i64
; ORIGIN-NEXT: [[TMP30:%.*]] = shl i64 [[TMP29]], 32
; ORIGIN-NEXT: [[TMP31:%.*]] = or i64 [[TMP29]], [[TMP30]]
-; ORIGIN-NEXT: store i64 [[TMP31]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
+; ORIGIN-NEXT: store i64 [[TMP31]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 16), align 8
+; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
; ORIGIN-NEXT: [[TMP32:%.*]] = zext i32 [[TMP13]] to i64
; ORIGIN-NEXT: [[TMP33:%.*]] = shl i64 [[TMP32]], 32
; ORIGIN-NEXT: [[TMP34:%.*]] = or i64 [[TMP32]], [[TMP33]]
-; ORIGIN-NEXT: store i64 [[TMP34]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8
-; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
+; ORIGIN-NEXT: store i64 [[TMP34]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 24), align 8
+; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
; ORIGIN-NEXT: [[TMP35:%.*]] = zext i32 [[TMP19]] to i64
; ORIGIN-NEXT: [[TMP36:%.*]] = shl i64 [[TMP35]], 32
; ORIGIN-NEXT: [[TMP37:%.*]] = or i64 [[TMP35]], [[TMP36]]
-; ORIGIN-NEXT: store i64 [[TMP37]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 32) to ptr), align 8
+; ORIGIN-NEXT: store i64 [[TMP37]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 32), align 8
; ORIGIN-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
; ORIGIN-NEXT: [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080
; ORIGIN-NEXT: [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
; ORIGIN-NEXT: [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416
; ORIGIN-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), ptr align 8 [[TMP40]], i64 16, i1 false)
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 48) to ptr), ptr align 8 [[TMP42]], i64 16, i1 false)
+; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), ptr align 8 [[TMP40]], i64 16, i1 false)
+; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 48), ptr align 8 [[TMP42]], i64 16, i1 false)
; ORIGIN-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN-NEXT: call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]])
; ORIGIN-NEXT: ret void
@@ -3827,48 +3815,48 @@ define void @VAArgStructNoSSE(ptr nocapture %s) sanitize_memory #0 {
; CALLS-NEXT: [[TMP20:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16)
; CALLS-NEXT: store i32 -1, ptr @__msan_param_tls, align 8
; CALLS-NEXT: store i32 0, ptr @__msan_param_origin_tls, align 4
-; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
-; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CALLS-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
-; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CALLS-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
-; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CALLS-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CALLS-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
+; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CALLS-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4
+; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8
+; CALLS-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4
+; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CALLS-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4
; CALLS-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
; CALLS-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
; CALLS-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
; CALLS-NEXT: [[TMP24:%.*]] = add i64 [[TMP22]], 17592186044416
; CALLS-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP23]], i64 16, i1 false)
-; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 40) to ptr), ptr align 4 [[TMP25]], i64 16, i1 false)
-; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP23]], i64 16, i1 false)
+; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 40), ptr align 4 [[TMP25]], i64 16, i1 false)
+; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8
; CALLS-NEXT: [[TMP26:%.*]] = zext i32 [[TMP13]] to i64
; CALLS-NEXT: [[TMP27:%.*]] = shl i64 [[TMP26]], 32
; CALLS-NEXT: [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]]
-; CALLS-NEXT: store i64 [[TMP28]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8
-; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CALLS-NEXT: store i64 [[TMP28]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 8), align 8
+; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8
; CALLS-NEXT: [[TMP29:%.*]] = zext i32 [[TMP19]] to i64
; CALLS-NEXT: [[TMP30:%.*]] = shl i64 [[TMP29]], 32
; CALLS-NEXT: [[TMP31:%.*]] = or i64 [[TMP29]], [[TMP30]]
-; CALLS-NEXT: store i64 [[TMP31]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8
-; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
+; CALLS-NEXT: store i64 [[TMP31]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 16), align 8
+; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8
; CALLS-NEXT: [[TMP32:%.*]] = zext i32 [[TMP13]] to i64
; CALLS-NEXT: [[TMP33:%.*]] = shl i64 [[TMP32]], 32
; CALLS-NEXT: [[TMP34:%.*]] = or i64 [[TMP32]], [[TMP33]]
-; CALLS-NEXT: store i64 [[TMP34]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8
-; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
+; CALLS-NEXT: store i64 [[TMP34]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 24), align 8
+; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8
; CALLS-NEXT: [[TMP35:%.*]] = zext i32 [[TMP19]] to i64
; CALLS-NEXT: [[TMP36:%.*]] = shl i64 [[TMP35]], 32
; CALLS-NEXT: [[TMP37:%.*]] = or i64 [[TMP35]], [[TMP36]]
-; CALLS-NEXT: store i64 [[TMP37]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 32) to ptr), align 8
+; CALLS-NEXT: store i64 [[TMP37]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 32), align 8
; CALLS-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
; CALLS-NEXT: [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080
; CALLS-NEXT: [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
; CALLS-NEXT: [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416
; CALLS-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr
-; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), ptr align 8 [[TMP40]], i64 16, i1 false)
-; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 48) to ptr), ptr align 8 [[TMP42]], i64 16, i1 false)
+; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), ptr align 8 [[TMP40]], i64 16, i1 false)
+; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 48), ptr align 8 [[TMP42]], i64 16, i1 false)
; CALLS-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CALLS-NEXT: call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]])
; CALLS-NEXT: ret void
diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
index 04fdd23..846912e 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
@@ -22,20 +22,20 @@ target triple = "x86_64-unknown-linux-gnu"
define void @Store(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
; CHECK-LABEL: @Store(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1:![0-9]+]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP5]], 17592186044416, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr, !dbg [[DBG1]]
-; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP6]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP2]], ptr [[P]], i32 zeroext [[TMP3]]), !dbg [[DBG1]]
-; CHECK-NEXT: store i32 [[X:%.*]], ptr [[P]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG2:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP5]], 17592186044416, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr, !dbg [[DBG2]]
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP6]], align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP2]], ptr [[P]], i32 zeroext [[TMP3]]), !dbg [[DBG2]]
+; CHECK-NEXT: store i32 [[X:%.*]], ptr [[P]], align 4, !dbg [[DBG2]]
; CHECK-NEXT: ret void
;
entry:
@@ -46,29 +46,29 @@ entry:
define void @LoadAndCmp(ptr nocapture %a) nounwind uwtable sanitize_memory {
; CHECK-LABEL: @LoadAndCmp(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A:%.*]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG1]]
-; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP5]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP9:%.*]] = xor i32 [[TMP2]], 0, !dbg [[DBG7:![0-9]+]]
-; CHECK-NEXT: [[TMP10:%.*]] = or i32 [[_MSLD]], 0, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], -1, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP13:%.*]] = and i32 [[TMP12]], [[TMP9]], !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[TMP13]], 0, !dbg [[DBG7]]
-; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 [[TMP11]], [[TMP14]], !dbg [[DBG7]]
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP2]], 0, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP15:%.*]] = zext i1 [[_MSPROP_ICMP]] to i8, !dbg [[DBG8:![0-9]+]]
-; CHECK-NEXT: call void @__msan_maybe_warning_1(i8 zeroext [[TMP15]], i32 zeroext [[TMP8]]), !dbg [[DBG8]]
-; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]], !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A:%.*]], align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG2]]
+; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP5]], align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP9:%.*]] = xor i32 [[TMP2]], 0, !dbg [[DBG8:![0-9]+]]
+; CHECK-NEXT: [[TMP10:%.*]] = or i32 [[_MSLD]], 0, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], -1, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP13:%.*]] = and i32 [[TMP12]], [[TMP9]], !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[TMP13]], 0, !dbg [[DBG8]]
+; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 [[TMP11]], [[TMP14]], !dbg [[DBG8]]
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP2]], 0, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP15:%.*]] = zext i1 [[_MSPROP_ICMP]] to i8, !dbg [[DBG9:![0-9]+]]
+; CHECK-NEXT: call void @__msan_maybe_warning_1(i8 zeroext [[TMP15]], i32 zeroext [[TMP8]]), !dbg [[DBG9]]
+; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]], !dbg [[DBG9]]
; CHECK: if.then:
; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: tail call void (...) @foo() #[[ATTR5:[0-9]+]]
@@ -92,10 +92,10 @@ declare void @foo(...)
define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory {
; CHECK-LABEL: @ReturnInt(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: ret i32 123, !dbg [[DBG1]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: ret i32 123, !dbg [[DBG2]]
;
entry:
ret i32 123, !dbg !10
@@ -104,22 +104,22 @@ entry:
define void @CopyRetVal(ptr nocapture %a) nounwind uwtable sanitize_memory {
; CHECK-LABEL: @CopyRetVal(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @ReturnInt() #[[ATTR5]], !dbg [[DBG1]]
-; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4, !dbg [[DBG7]]
-; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A:%.*]] to i64, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG7]]
-; CHECK-NEXT: store i32 [[_MSRET]], ptr [[TMP5]], align 4, !dbg [[DBG7]]
-; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSRET]], ptr [[A]], i32 zeroext [[TMP2]]), !dbg [[DBG7]]
-; CHECK-NEXT: store i32 [[CALL]], ptr [[A]], align 4, !dbg [[DBG7]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @ReturnInt() #[[ATTR5]], !dbg [[DBG2]]
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4, !dbg [[DBG8]]
+; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A:%.*]] to i64, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG8]]
+; CHECK-NEXT: store i32 [[_MSRET]], ptr [[TMP5]], align 4, !dbg [[DBG8]]
+; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSRET]], ptr [[A]], i32 zeroext [[TMP2]]), !dbg [[DBG8]]
+; CHECK-NEXT: store i32 [[CALL]], ptr [[A]], align 4, !dbg [[DBG8]]
; CHECK-NEXT: ret void
;
entry:
@@ -133,32 +133,32 @@ entry:
define void @SExt(ptr nocapture %a, ptr nocapture %b) nounwind uwtable sanitize_memory {
; CHECK-LABEL: @SExt(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr [[B:%.*]], align 2, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[B]] to i64, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP6]], 17592186044416, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr, !dbg [[DBG1]]
-; CHECK-NEXT: [[_MSLD:%.*]] = load i16, ptr [[TMP7]], align 2, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[_MSPROP:%.*]] = sext i16 [[_MSLD]] to i32, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP12:%.*]] = sext i16 [[TMP4]] to i32, !dbg [[DBG7]]
-; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP2]], i32 zeroext [[TMP3]]), !dbg [[DBG8]]
-; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[A:%.*]] to i64, !dbg [[DBG8]]
-; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080, !dbg [[DBG8]]
-; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr, !dbg [[DBG8]]
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416, !dbg [[DBG8]]
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr, !dbg [[DBG8]]
-; CHECK-NEXT: store i32 [[_MSPROP]], ptr [[TMP15]], align 4, !dbg [[DBG8]]
-; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSPROP]], ptr [[A]], i32 zeroext [[TMP11]]), !dbg [[DBG8]]
-; CHECK-NEXT: store i32 [[TMP12]], ptr [[A]], align 4, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr [[B:%.*]], align 2, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[B]] to i64, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP6]], 17592186044416, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr, !dbg [[DBG2]]
+; CHECK-NEXT: [[_MSLD:%.*]] = load i16, ptr [[TMP7]], align 2, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[_MSPROP:%.*]] = sext i16 [[_MSLD]] to i32, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP12:%.*]] = sext i16 [[TMP4]] to i32, !dbg [[DBG8]]
+; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP2]], i32 zeroext [[TMP3]]), !dbg [[DBG9]]
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[A:%.*]] to i64, !dbg [[DBG9]]
+; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080, !dbg [[DBG9]]
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr, !dbg [[DBG9]]
+; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416, !dbg [[DBG9]]
+; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr, !dbg [[DBG9]]
+; CHECK-NEXT: store i32 [[_MSPROP]], ptr [[TMP15]], align 4, !dbg [[DBG9]]
+; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSPROP]], ptr [[A]], i32 zeroext [[TMP11]]), !dbg [[DBG9]]
+; CHECK-NEXT: store i32 [[TMP12]], ptr [[A]], align 4, !dbg [[DBG9]]
; CHECK-NEXT: ret void
;
entry:
@@ -171,8 +171,8 @@ entry:
define void @MemSet(ptr nocapture %x) nounwind uwtable sanitize_memory {
; CHECK-LABEL: @MemSet(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X:%.*]], i32 42, i64 10), !dbg [[DBG1]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X:%.*]], i32 42, i64 10), !dbg [[DBG2]]
; CHECK-NEXT: ret void
;
entry:
@@ -187,10 +187,10 @@ declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
define void @MemCpy(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory {
; CHECK-LABEL: @MemCpy(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG1]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG2]]
; CHECK-NEXT: ret void
;
entry:
@@ -204,8 +204,8 @@ declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounw
define void @MemSetInline(ptr nocapture %x) nounwind uwtable sanitize_memory {
; CHECK-LABEL: @MemSetInline(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X:%.*]], i32 42, i64 10), !dbg [[DBG1]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X:%.*]], i32 42, i64 10), !dbg [[DBG2]]
; CHECK-NEXT: ret void
;
entry:
@@ -219,10 +219,10 @@ declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind
define void @MemCpyInline(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory {
; CHECK-LABEL: @MemCpyInline(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG1]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG2]]
; CHECK-NEXT: ret void
;
entry:
@@ -236,10 +236,10 @@ declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1
define void @MemMove(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory {
; CHECK-LABEL: @MemMove(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__msan_memmove(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG1]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__msan_memmove(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG2]]
; CHECK-NEXT: ret void
;
entry:
@@ -256,8 +256,8 @@ declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture write
define void @atomic_memcpy(ptr nocapture %x, ptr nocapture %y) nounwind {
; CHECK-LABEL: @atomic_memcpy(
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X:%.*]], ptr align 2 [[Y:%.*]], i64 16, i32 1), !dbg [[DBG1]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X:%.*]], ptr align 2 [[Y:%.*]], i64 16, i32 1), !dbg [[DBG2]]
; CHECK-NEXT: ret void
;
call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %x, ptr align 2 %y, i64 16, i32 1), !dbg !10
@@ -266,8 +266,8 @@ define void @atomic_memcpy(ptr nocapture %x, ptr nocapture %y) nounwind {
define void @atomic_memmove(ptr nocapture %x, ptr nocapture %y) nounwind {
; CHECK-LABEL: @atomic_memmove(
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X:%.*]], ptr align 2 [[Y:%.*]], i64 16, i32 1), !dbg [[DBG1]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X:%.*]], ptr align 2 [[Y:%.*]], i64 16, i32 1), !dbg [[DBG2]]
; CHECK-NEXT: ret void
;
call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %x, ptr align 2 %y, i64 16, i32 1), !dbg !10
@@ -276,8 +276,8 @@ define void @atomic_memmove(ptr nocapture %x, ptr nocapture %y) nounwind {
define void @atomic_memset(ptr nocapture %x) nounwind {
; CHECK-LABEL: @atomic_memset(
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 [[X:%.*]], i8 88, i64 16, i32 1), !dbg [[DBG1]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 [[X:%.*]], i8 88, i64 16, i32 1), !dbg [[DBG2]]
; CHECK-NEXT: ret void
;
call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %x, i8 88, i64 16, i32 1), !dbg !10
@@ -290,21 +290,21 @@ define void @atomic_memset(ptr nocapture %x) nounwind {
define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_memory {
; CHECK-LABEL: @Select(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[C:%.*]], i32 [[TMP2]], i32 [[TMP4]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP7:%.*]] = xor i32 [[A:%.*]], [[B:%.*]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP8:%.*]] = or i32 [[TMP7]], [[TMP2]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP9:%.*]] = or i32 [[TMP8]], [[TMP4]], !dbg [[DBG1]]
-; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], i32 [[TMP9]], i32 [[TMP6]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[C]], i32 [[TMP3]], i32 [[TMP5]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP10]], !dbg [[DBG1]]
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[C]], i32 [[A]], i32 [[B]], !dbg [[DBG1]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[C:%.*]], i32 [[TMP2]], i32 [[TMP4]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP7:%.*]] = xor i32 [[A:%.*]], [[B:%.*]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP8:%.*]] = or i32 [[TMP7]], [[TMP2]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP9:%.*]] = or i32 [[TMP8]], [[TMP4]], !dbg [[DBG2]]
+; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], i32 [[TMP9]], i32 [[TMP6]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[C]], i32 [[TMP3]], i32 [[TMP5]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP10]], !dbg [[DBG2]]
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[C]], i32 [[A]], i32 [[B]], !dbg [[DBG2]]
; CHECK-NEXT: store i32 [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: store i32 [[TMP11]], ptr @__msan_retval_origin_tls, align 4
; CHECK-NEXT: ret i32 [[COND]]
@@ -320,25 +320,25 @@ entry:
define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory {
; CHECK-LABEL: @SelectVector(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[C:%.*]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP7:%.*]] = xor <8 x i16> [[A:%.*]], [[B:%.*]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP8:%.*]] = or <8 x i16> [[TMP7]], [[TMP2]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP9:%.*]] = or <8 x i16> [[TMP8]], [[TMP4]], !dbg [[DBG1]]
-; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[TMP9]], <8 x i16> [[TMP6]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x i1> [[C]] to i8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp ne i8 [[TMP10]], 0, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x i1> [[TMP0]] to i8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP11]], i32 [[TMP3]], i32 [[TMP5]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP15:%.*]] = select i1 [[TMP13]], i32 [[TMP1]], i32 [[TMP14]], !dbg [[DBG1]]
-; CHECK-NEXT: [[COND:%.*]] = select <8 x i1> [[C]], <8 x i16> [[A]], <8 x i16> [[B]], !dbg [[DBG1]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[C:%.*]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP7:%.*]] = xor <8 x i16> [[A:%.*]], [[B:%.*]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP8:%.*]] = or <8 x i16> [[TMP7]], [[TMP2]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP9:%.*]] = or <8 x i16> [[TMP8]], [[TMP4]], !dbg [[DBG2]]
+; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[TMP9]], <8 x i16> [[TMP6]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x i1> [[C]] to i8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ne i8 [[TMP10]], 0, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x i1> [[TMP0]] to i8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP11]], i32 [[TMP3]], i32 [[TMP5]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP15:%.*]] = select i1 [[TMP13]], i32 [[TMP1]], i32 [[TMP14]], !dbg [[DBG2]]
+; CHECK-NEXT: [[COND:%.*]] = select <8 x i1> [[C]], <8 x i16> [[A]], <8 x i16> [[B]], !dbg [[DBG2]]
; CHECK-NEXT: store <8 x i16> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: store i32 [[TMP15]], ptr @__msan_retval_origin_tls, align 4
; CHECK-NEXT: ret <8 x i16> [[COND]]
@@ -354,10 +354,10 @@ entry:
define ptr @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
; CHECK-LABEL: @IntToPtr(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[X:%.*]] to ptr, !dbg [[DBG1]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[X:%.*]] to ptr, !dbg [[DBG2]]
; CHECK-NEXT: store i64 [[TMP0]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; CHECK-NEXT: ret ptr [[TMP2]]
@@ -374,13 +374,13 @@ entry:
define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
; CHECK-LABEL: @Div(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: call void @__msan_maybe_warning_4(i32 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG1]]
-; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[A:%.*]], [[B:%.*]], !dbg [[DBG1]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: call void @__msan_maybe_warning_4(i32 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG2]]
+; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[A:%.*]], [[B:%.*]], !dbg [[DBG2]]
; CHECK-NEXT: store i32 [[TMP2]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: store i32 [[TMP3]], ptr @__msan_retval_origin_tls, align 4
; CHECK-NEXT: ret i32 [[DIV]]
@@ -398,24 +398,24 @@ entry:
define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
; CHECK-LABEL: @ShadowLoadAlignmentLarge(
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[Y:%.*]] = alloca i32, align 64, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 64 [[TMP3]], i8 -1, i64 4, i1 false), !dbg [[DBG1]]
-; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[Y]], i64 4, ptr @[[GLOB0:[0-9]+]], ptr @[[GLOB1:[0-9]+]]), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP8:%.*]] = load volatile i32, ptr [[Y]], align 64, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP10]], 17592186044416, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr, !dbg [[DBG7]]
-; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP11]], align 64, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 64, !dbg [[DBG7]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[Y:%.*]] = alloca i32, align 64, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 64 [[TMP3]], i8 -1, i64 4, i1 false), !dbg [[DBG2]]
+; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[Y]], i64 4, ptr @[[GLOB0:[0-9]+]], ptr @[[GLOB1:[0-9]+]]), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP8:%.*]] = load volatile i32, ptr [[Y]], align 64, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP13]], 87960930222080, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr, !dbg [[DBG8]]
+; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 64, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP12]], align 64, !dbg [[DBG8]]
; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: store i32 [[TMP14]], ptr @__msan_retval_origin_tls, align 4
; CHECK-NEXT: ret i32 [[TMP8]]
@@ -429,14 +429,14 @@ define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
; CHECK-LABEL: @ExtractElement(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP3]], i32 [[IDX:%.*]], !dbg [[DBG1]]
-; CHECK-NEXT: call void @__msan_maybe_warning_4(i32 zeroext [[TMP1]], i32 zeroext [[TMP2]]), !dbg [[DBG1]]
-; CHECK-NEXT: [[X:%.*]] = extractelement <4 x i32> [[VEC:%.*]], i32 [[IDX]], !dbg [[DBG1]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP3]], i32 [[IDX:%.*]], !dbg [[DBG2]]
+; CHECK-NEXT: call void @__msan_maybe_warning_4(i32 zeroext [[TMP1]], i32 zeroext [[TMP2]]), !dbg [[DBG2]]
+; CHECK-NEXT: [[X:%.*]] = extractelement <4 x i32> [[VEC:%.*]], i32 [[IDX]], !dbg [[DBG2]]
; CHECK-NEXT: store i32 [[_MSPROP]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: store i32 [[TMP4]], ptr @__msan_retval_origin_tls, align 4
; CHECK-NEXT: ret i32 [[X]]
@@ -448,20 +448,20 @@ define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
; CHECK-LABEL: @InsertElement(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[TMP5]], i32 [[IDX:%.*]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 0, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 [[TMP6]], i32 [[TMP4]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 [[TMP8]], !dbg [[DBG1]]
-; CHECK-NEXT: call void @__msan_maybe_warning_4(i32 zeroext [[TMP1]], i32 zeroext [[TMP2]]), !dbg [[DBG1]]
-; CHECK-NEXT: [[VEC1:%.*]] = insertelement <4 x i32> [[VEC:%.*]], i32 [[X:%.*]], i32 [[IDX]], !dbg [[DBG1]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[TMP5]], i32 [[IDX:%.*]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 0, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 [[TMP6]], i32 [[TMP4]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 [[TMP8]], !dbg [[DBG2]]
+; CHECK-NEXT: call void @__msan_maybe_warning_4(i32 zeroext [[TMP1]], i32 zeroext [[TMP2]]), !dbg [[DBG2]]
+; CHECK-NEXT: [[VEC1:%.*]] = insertelement <4 x i32> [[VEC:%.*]], i32 [[X:%.*]], i32 [[IDX]], !dbg [[DBG2]]
; CHECK-NEXT: store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: store i32 [[TMP10]], ptr @__msan_retval_origin_tls, align 4
; CHECK-NEXT: ret <4 x i32> [[VEC1]]
@@ -473,16 +473,16 @@ define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memor
define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory {
; CHECK-LABEL: @ShuffleVector(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i128 [[TMP5]], 0, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP4]], i32 [[TMP2]], !dbg [[DBG1]]
-; CHECK-NEXT: [[VEC2:%.*]] = shufflevector <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>, !dbg [[DBG1]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i128 [[TMP5]], 0, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP4]], i32 [[TMP2]], !dbg [[DBG2]]
+; CHECK-NEXT: [[VEC2:%.*]] = shufflevector <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>, !dbg [[DBG2]]
; CHECK-NEXT: store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: store i32 [[TMP7]], ptr @__msan_retval_origin_tls, align 4
; CHECK-NEXT: ret <4 x i32> [[VEC2]]
@@ -499,74 +499,70 @@ declare void @llvm.va_start(ptr) nounwind
define void @VAStart(i32 %x, ...) sanitize_memory {
; CHECK-LABEL: @VAStart(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false), !dbg [[DBG1]]
-; CHECK-NEXT: [[SRCSZ:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800), !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[SRCSZ]], i1 false), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP3:%.*]] = alloca i8, i64 [[TMP1]], align 8, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 @__msan_va_arg_origin_tls, i64 [[SRCSZ]], i1 false), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[X_ADDR]] to i64, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP7]], 17592186044416, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP8]], i8 -1, i64 4, i1 false), !dbg [[DBG1]]
-; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[X_ADDR]], i64 4, ptr @[[GLOB2:[0-9]+]], ptr @[[GLOB3:[0-9]+]]), !dbg [[DBG1]]
-; CHECK-NEXT: [[VA:%.*]] = alloca [1 x %struct.__va_list_tag], align 16, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP17:%.*]] = and i64 [[TMP16]], -4, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr, !dbg [[DBG7]]
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP15]], i8 -1, i64 24, i1 false), !dbg [[DBG7]]
-; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[VA]], i64 24, ptr @[[GLOB4:[0-9]+]], ptr @[[GLOB5:[0-9]+]]), !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[X_ADDR]] to i64, !dbg [[DBG8]]
-; CHECK-NEXT: [[TMP21:%.*]] = xor i64 [[TMP20]], 87960930222080, !dbg [[DBG8]]
-; CHECK-NEXT: [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr, !dbg [[DBG8]]
-; CHECK-NEXT: [[TMP23:%.*]] = add i64 [[TMP21]], 17592186044416, !dbg [[DBG8]]
-; CHECK-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr, !dbg [[DBG8]]
-; CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP22]], align 4, !dbg [[DBG8]]
-; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP4]], ptr [[X_ADDR]], i32 zeroext [[TMP5]]), !dbg [[DBG8]]
-; CHECK-NEXT: store i32 [[X:%.*]], ptr [[X_ADDR]], align 4, !dbg [[DBG8]]
-; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG11:![0-9]+]]
-; CHECK-NEXT: [[TMP27:%.*]] = xor i64 [[TMP26]], 87960930222080, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP29:%.*]] = add i64 [[TMP27]], 17592186044416, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr, !dbg [[DBG11]]
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 24, i1 false), !dbg [[DBG11]]
-; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]]), !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP32:%.*]] = add i64 [[TMP31]], 16, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to ptr, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP35:%.*]] = ptrtoint ptr [[TMP34]] to i64, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP36:%.*]] = xor i64 [[TMP35]], 87960930222080, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP37:%.*]] = inttoptr i64 [[TMP36]] to ptr, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP38:%.*]] = add i64 [[TMP36]], 17592186044416, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP39:%.*]] = inttoptr i64 [[TMP38]] to ptr, !dbg [[DBG11]]
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP37]], ptr align 16 [[TMP2]], i64 176, i1 false), !dbg [[DBG11]]
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP39]], ptr align 16 [[TMP3]], i64 176, i1 false), !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP42:%.*]] = add i64 [[TMP41]], 8, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP43:%.*]] = inttoptr i64 [[TMP42]] to ptr, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP44:%.*]] = load ptr, ptr [[TMP43]], align 8, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP44]] to i64, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP46:%.*]] = xor i64 [[TMP45]], 87960930222080, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP47:%.*]] = inttoptr i64 [[TMP46]] to ptr, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP48:%.*]] = add i64 [[TMP46]], 17592186044416, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP49:%.*]] = inttoptr i64 [[TMP48]] to ptr, !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP50:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176, !dbg [[DBG11]]
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP47]], ptr align 16 [[TMP50]], i64 [[TMP0]], i1 false), !dbg [[DBG11]]
-; CHECK-NEXT: [[TMP51:%.*]] = getelementptr i8, ptr [[TMP3]], i32 176, !dbg [[DBG11]]
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP49]], ptr align 16 [[TMP51]], i64 [[TMP0]], i1 false), !dbg [[DBG11]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800), !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP4:%.*]] = alloca i8, i64 [[TMP1]], align 8, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP4]], ptr align 8 @__msan_va_arg_origin_tls, i64 [[TMP3]], i1 false), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X_ADDR]] to i64, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP8]], 17592186044416, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP10]], -4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP9]], i8 -1, i64 4, i1 false), !dbg [[DBG2]]
+; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[X_ADDR]], i64 4, ptr @[[GLOB2:[0-9]+]], ptr @[[GLOB3:[0-9]+]]), !dbg [[DBG2]]
+; CHECK-NEXT: [[VA:%.*]] = alloca [1 x %struct.__va_list_tag], align 16, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP17:%.*]] = and i64 [[TMP16]], -4, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr, !dbg [[DBG8]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP15]], i8 -1, i64 24, i1 false), !dbg [[DBG8]]
+; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[VA]], i64 24, ptr @[[GLOB4:[0-9]+]], ptr @[[GLOB5:[0-9]+]]), !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[X_ADDR]] to i64, !dbg [[DBG9]]
+; CHECK-NEXT: [[TMP20:%.*]] = xor i64 [[TMP19]], 87960930222080, !dbg [[DBG9]]
+; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr, !dbg [[DBG9]]
+; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[TMP20]], 17592186044416, !dbg [[DBG9]]
+; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr, !dbg [[DBG9]]
+; CHECK-NEXT: store i32 [[TMP5]], ptr [[TMP21]], align 4, !dbg [[DBG9]]
+; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP5]], ptr [[X_ADDR]], i32 zeroext [[TMP6]]), !dbg [[DBG9]]
+; CHECK-NEXT: store i32 [[X:%.*]], ptr [[X_ADDR]], align 4, !dbg [[DBG9]]
+; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG10:![0-9]+]]
+; CHECK-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], 87960930222080, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP25]], 17592186044416, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr, !dbg [[DBG10]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP26]], i8 0, i64 24, i1 false), !dbg [[DBG10]]
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]]), !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[VA]], i64 16, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP30:%.*]] = load ptr, ptr [[TMP29]], align 8, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[TMP30]] to i64, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP32:%.*]] = xor i64 [[TMP31]], 87960930222080, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to ptr, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP34:%.*]] = add i64 [[TMP32]], 17592186044416, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP35:%.*]] = inttoptr i64 [[TMP34]] to ptr, !dbg [[DBG10]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP33]], ptr align 16 [[TMP2]], i64 176, i1 false), !dbg [[DBG10]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP35]], ptr align 16 [[TMP4]], i64 176, i1 false), !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[VA]], i64 8, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[TMP37]] to i64, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr, !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176, !dbg [[DBG10]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP40]], ptr align 16 [[TMP43]], i64 [[TMP0]], i1 false), !dbg [[DBG10]]
+; CHECK-NEXT: [[TMP44:%.*]] = getelementptr i8, ptr [[TMP4]], i32 176, !dbg [[DBG10]]
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP42]], ptr align 16 [[TMP44]], i64 [[TMP0]], i1 false), !dbg [[DBG10]]
; CHECK-NEXT: ret void
;
entry:
@@ -582,15 +578,15 @@ entry:
define i32 @NoSanitizeMemory(i32 %x) uwtable {
; CHECK-LABEL: @NoSanitizeMemory(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[X:%.*]], 0, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 -1, [[TMP0]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0, !dbg [[DBG1]]
-; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]], !dbg [[DBG1]]
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[X]], 0, !dbg [[DBG1]]
-; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]], !dbg [[DBG7]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[X:%.*]], 0, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 -1, [[TMP0]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0, !dbg [[DBG2]]
+; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]], !dbg [[DBG2]]
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[X]], 0, !dbg [[DBG2]]
+; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]], !dbg [[DBG8]]
; CHECK: if.then:
-; CHECK-NEXT: tail call void @bar(), !dbg [[DBG8]]
+; CHECK-NEXT: tail call void @bar(), !dbg [[DBG9]]
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
@@ -615,18 +611,18 @@ declare void @bar()
define i32 @NoSanitizeMemoryAlloca() {
; CHECK-LABEL: @NoSanitizeMemoryAlloca(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[P:%.*]] = alloca i32, align 4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P]] to i64, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -4, !dbg [[DBG1]]
-; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP2]], i8 0, i64 4, i1 false), !dbg [[DBG1]]
-; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG7]]
-; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG7]]
-; CHECK-NEXT: [[X:%.*]] = call i32 @NoSanitizeMemoryAllocaHelper(ptr [[P]]), !dbg [[DBG7]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[P:%.*]] = alloca i32, align 4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P]] to i64, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -4, !dbg [[DBG2]]
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP2]], i8 0, i64 4, i1 false), !dbg [[DBG2]]
+; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG8]]
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG8]]
+; CHECK-NEXT: [[X:%.*]] = call i32 @NoSanitizeMemoryAllocaHelper(ptr [[P]]), !dbg [[DBG8]]
; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
@@ -647,10 +643,10 @@ declare i32 @NoSanitizeMemoryAllocaHelper(ptr %p)
define i32 @NoSanitizeMemoryUndef() {
; CHECK-LABEL: @NoSanitizeMemoryUndef(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG1]]
-; CHECK-NEXT: [[X:%.*]] = call i32 @NoSanitizeMemoryUndefHelper(i32 undef), !dbg [[DBG1]]
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG2]]
+; CHECK-NEXT: [[X:%.*]] = call i32 @NoSanitizeMemoryUndefHelper(i32 undef), !dbg [[DBG2]]
; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
@@ -672,21 +668,21 @@ declare void @foo8(ptr nocapture)
define void @msan() sanitize_memory {
; CHECK-LABEL: @msan(
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT: [[TEXT:%.*]] = alloca i8, align 1, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TEXT]]), !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[TEXT]] to i64, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -4, !dbg [[DBG7]]
-; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG7]]
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP2]], i8 -1, i64 1, i1 false), !dbg [[DBG7]]
-; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[TEXT]], i64 1, ptr @[[GLOB6:[0-9]+]], ptr @[[GLOB7:[0-9]+]]), !dbg [[DBG7]]
-; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG8]]
-; CHECK-NEXT: call void @foo8(ptr [[TEXT]]), !dbg [[DBG8]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TEXT]]), !dbg
-; CHECK-NEXT: ret void, !dbg
+; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]]
+; CHECK-NEXT: [[TEXT:%.*]] = alloca i8, align 1, !dbg [[DBG2]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TEXT]]), !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[TEXT]] to i64, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -4, !dbg [[DBG8]]
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG8]]
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP2]], i8 -1, i64 1, i1 false), !dbg [[DBG8]]
+; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[TEXT]], i64 1, ptr @[[GLOB6:[0-9]+]], ptr @[[GLOB7:[0-9]+]]), !dbg [[DBG8]]
+; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG9]]
+; CHECK-NEXT: call void @foo8(ptr [[TEXT]]), !dbg [[DBG9]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TEXT]]), !dbg [[DBG11:![0-9]+]]
+; CHECK-NEXT: ret void, !dbg [[DBG12:![0-9]+]]
;
entry:
%text = alloca i8, align 1, !dbg !10
diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll
index 946c95b..13a50c2 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll
@@ -36,7 +36,7 @@ define noundef i32 @LoadedRet() nounwind uwtable sanitize_memory {
; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1:![0-9]+]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR3:[0-9]+]]
; CHECK-NEXT: unreachable
@@ -69,8 +69,8 @@ define void @NormalArg(i32 noundef %a) nounwind uwtable sanitize_memory {
define void @NormalArgAfterNoUndef(i32 noundef %a, i32 %b) nounwind uwtable sanitize_memory {
; CHECK-LABEL: @NormalArgAfterNoUndef(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[P:%.*]] = inttoptr i64 0 to ptr
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
@@ -80,7 +80,7 @@ define void @NormalArgAfterNoUndef(i32 noundef %a, i32 %b) nounwind uwtable sani
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: store i32 [[TMP1]], ptr [[TMP5]], align 4
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP7]], align 4
; CHECK-NEXT: br label [[TMP9]]
@@ -106,7 +106,7 @@ define void @PartialArg(i32 %a) nounwind uwtable sanitize_memory {
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: store i32 [[TMP1]], ptr [[TMP5]], align 4
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
; CHECK: 8:
; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP7]], align 4
; CHECK-NEXT: br label [[TMP9]]
@@ -135,7 +135,7 @@ define void @CallNormalArgAfterNoUndef() nounwind uwtable sanitize_memory {
; CHECK-LABEL: @CallNormalArgAfterNoUndef(
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[R:%.*]] = call i32 @NormalRet() #[[ATTR0]]
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @NormalArgAfterNoUndef(i32 [[R]], i32 [[R]]) #[[ATTR0]]
; CHECK-NEXT: ret void
;
@@ -157,7 +157,7 @@ define void @CallWithLoaded() nounwind uwtable sanitize_memory {
; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR3]]
; CHECK-NEXT: unreachable
diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
index 4b7a910..5d63367 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; KMSAN instrumentation tests
; RUN: opt < %s -msan-kernel=1 -S -passes=msan 2>&1 | FileCheck %s -check-prefixes=CHECK
@@ -6,309 +7,455 @@ target triple = "x86_64-unknown-linux-gnu"
; Check the instrumentation prologue.
define void @Empty() nounwind uwtable sanitize_memory {
+; CHECK-LABEL: define void @Empty(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: ret void
+;
entry:
ret void
}
-; CHECK-LABEL: @Empty
-; CHECK: entry:
-; CHECK: @__msan_get_context_state()
-; %param_shadow:
-; CHECK: getelementptr {{.*}} i32 0, i32 0
-; %retval_shadow:
-; CHECK: getelementptr {{.*}} i32 0, i32 1
-; %va_arg_shadow:
-; CHECK: getelementptr {{.*}} i32 0, i32 2
-; %va_arg_origin:
-; CHECK: getelementptr {{.*}} i32 0, i32 3
-; %va_arg_overflow_size:
-; CHECK: getelementptr {{.*}} i32 0, i32 4
-; %param_origin:
-; CHECK: getelementptr {{.*}} i32 0, i32 5
-; %retval_origin:
-; CHECK: getelementptr {{.*}} i32 0, i32 6
-
; Check instrumentation of stores
-
define void @Store1(ptr nocapture %p, i8 %x) nounwind uwtable sanitize_memory {
+; CHECK-LABEL: define void @Store1(
+; CHECK-SAME: ptr captures(none) [[P:%.*]], i8 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[_MSARG1]], align 8
+; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[_MSARG_O2]], align 4
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1:![0-9]+]]
+; CHECK: [[BB5]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8:[0-9]+]]
+; CHECK-NEXT: br label %[[BB6]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: [[TMP13:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr [[P]])
+; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 0
+; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 1
+; CHECK-NEXT: store i8 [[TMP7]], ptr [[TMP14]], align 1
+; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i8 [[TMP7]], 0
+; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB10:.*]], label %[[BB12:.*]], !prof [[PROF1]]
+; CHECK: [[BB10]]:
+; CHECK-NEXT: [[TMP17:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP10]])
+; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP15]], align 4
+; CHECK-NEXT: br label %[[BB12]]
+; CHECK: [[BB12]]:
+; CHECK-NEXT: store i8 [[X]], ptr [[P]], align 1
+; CHECK-NEXT: ret void
+;
entry:
store i8 %x, ptr %p
ret void
}
-; CHECK-LABEL: @Store1
-; CHECK: entry:
-; CHECK: @__msan_get_context_state()
-; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0
-; CHECK: [[BASE:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]]
-; CHECK: [[SHADOW_PTR:%[a-z0-9_]+]] = inttoptr {{.*}} [[BASE]]
-; CHECK: [[SHADOW:%[a-z0-9]+]] = load i64, ptr [[SHADOW_PTR]]
-; CHECK: [[BASE2:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]]
-; Load the shadow of %p and check it
-; CHECK: icmp ne i64 [[SHADOW]]
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; CHECK: @__msan_metadata_ptr_for_store_1(ptr %p)
-; CHECK: store i8
-; If the new shadow is non-zero, jump to __msan_chain_origin()
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; CHECK: @__msan_chain_origin
-; Storing origin here:
-; CHECK: store i32
-; CHECK: br label
-; CHECK: {{^[0-9]+}}:
-; CHECK: store i8
-; CHECK: ret void
-
define void @Store2(ptr nocapture %p, i16 %x) nounwind uwtable sanitize_memory {
+; CHECK-LABEL: define void @Store2(
+; CHECK-SAME: ptr captures(none) [[P:%.*]], i16 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr [[_MSARG1]], align 8
+; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[_MSARG_O2]], align 4
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
+; CHECK: [[BB5]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]]
+; CHECK-NEXT: br label %[[BB6]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: [[TMP13:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_2(ptr [[P]])
+; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 0
+; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 1
+; CHECK-NEXT: store i16 [[TMP7]], ptr [[TMP14]], align 2
+; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i16 [[TMP7]], 0
+; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB10:.*]], label %[[BB12:.*]], !prof [[PROF1]]
+; CHECK: [[BB10]]:
+; CHECK-NEXT: [[TMP17:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP10]])
+; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP15]], align 4
+; CHECK-NEXT: br label %[[BB12]]
+; CHECK: [[BB12]]:
+; CHECK-NEXT: store i16 [[X]], ptr [[P]], align 2
+; CHECK-NEXT: ret void
+;
entry:
store i16 %x, ptr %p
ret void
}
-; CHECK-LABEL: @Store2
-; CHECK: entry:
-; CHECK: @__msan_get_context_state()
-; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0
-; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]]
-; Load the shadow of %p and check it
-; CHECK: load i64
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; CHECK: @__msan_metadata_ptr_for_store_2(ptr %p)
-; CHECK: store i16
-; If the new shadow is non-zero, jump to __msan_chain_origin()
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; CHECK: @__msan_chain_origin
-; Storing origin here:
-; CHECK: store i32
-; CHECK: br label
-; CHECK: {{^[0-9]+}}:
-; CHECK: store i16
-; CHECK: ret void
-
-
define void @Store4(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
+; CHECK-LABEL: define void @Store4(
+; CHECK-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[_MSARG1]], align 8
+; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[_MSARG_O2]], align 4
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
+; CHECK: [[BB5]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]]
+; CHECK-NEXT: br label %[[BB6]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: [[TMP13:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_4(ptr [[P]])
+; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 0
+; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 1
+; CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP14]], align 4
+; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i32 [[TMP7]], 0
+; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB10:.*]], label %[[BB12:.*]], !prof [[PROF1]]
+; CHECK: [[BB10]]:
+; CHECK-NEXT: [[TMP17:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP10]])
+; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP15]], align 4
+; CHECK-NEXT: br label %[[BB12]]
+; CHECK: [[BB12]]:
+; CHECK-NEXT: store i32 [[X]], ptr [[P]], align 4
+; CHECK-NEXT: ret void
+;
entry:
store i32 %x, ptr %p
ret void
}
-; CHECK-LABEL: @Store4
-; CHECK: entry:
-; CHECK: @__msan_get_context_state()
-; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0
-; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]]
-; Load the shadow of %p and check it
-; CHECK: load i32
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; CHECK: @__msan_metadata_ptr_for_store_4(ptr %p)
-; CHECK: store i32
-; If the new shadow is non-zero, jump to __msan_chain_origin()
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; CHECK: @__msan_chain_origin
-; Storing origin here:
-; CHECK: store i32
-; CHECK: br label
-; CHECK: {{^[0-9]+}}:
-; CHECK: store i32
-; CHECK: ret void
-
define void @Store8(ptr nocapture %p, i64 %x) nounwind uwtable sanitize_memory {
+; CHECK-LABEL: define void @Store8(
+; CHECK-SAME: ptr captures(none) [[P:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[_MSARG1]], align 8
+; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[_MSARG_O2]], align 4
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
+; CHECK: [[BB5]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]]
+; CHECK-NEXT: br label %[[BB6]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: [[TMP13:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_8(ptr [[P]])
+; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 0
+; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 1
+; CHECK-NEXT: store i64 [[TMP7]], ptr [[TMP14]], align 8
+; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB10:.*]], label %[[BB15:.*]], !prof [[PROF1]]
+; CHECK: [[BB10]]:
+; CHECK-NEXT: [[TMP17:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP10]])
+; CHECK-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
+; CHECK-NEXT: [[TMP19:%.*]] = shl i64 [[TMP18]], 32
+; CHECK-NEXT: [[TMP20:%.*]] = or i64 [[TMP18]], [[TMP19]]
+; CHECK-NEXT: store i64 [[TMP20]], ptr [[TMP15]], align 8
+; CHECK-NEXT: br label %[[BB15]]
+; CHECK: [[BB15]]:
+; CHECK-NEXT: store i64 [[X]], ptr [[P]], align 8
+; CHECK-NEXT: ret void
+;
entry:
store i64 %x, ptr %p
ret void
}
-; CHECK-LABEL: @Store8
-; CHECK: entry:
-; CHECK: @__msan_get_context_state()
-; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0
-; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]]
-; Load the shadow of %p and check it
-; CHECK: load i64
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; CHECK: @__msan_metadata_ptr_for_store_8(ptr %p)
-; CHECK: store i64
-; If the new shadow is non-zero, jump to __msan_chain_origin()
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; CHECK: @__msan_chain_origin
-; Storing origin here:
-; CHECK: store i64
-; CHECK: br label
-; CHECK: {{^[0-9]+}}:
-; CHECK: store i64
-; CHECK: ret void
-
define void @Store16(ptr nocapture %p, i128 %x) nounwind uwtable sanitize_memory {
+; CHECK-LABEL: define void @Store16(
+; CHECK-SAME: ptr captures(none) [[P:%.*]], i128 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i128, ptr [[_MSARG1]], align 8
+; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[_MSARG_O2]], align 4
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
+; CHECK: [[BB5]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]]
+; CHECK-NEXT: br label %[[BB6]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: [[TMP13:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_n(ptr [[P]], i64 16)
+; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 0
+; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 1
+; CHECK-NEXT: store i128 [[TMP7]], ptr [[TMP14]], align 8
+; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i128 [[TMP7]], 0
+; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB10:.*]], label %[[BB16:.*]], !prof [[PROF1]]
+; CHECK: [[BB10]]:
+; CHECK-NEXT: [[TMP17:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP10]])
+; CHECK-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
+; CHECK-NEXT: [[TMP19:%.*]] = shl i64 [[TMP18]], 32
+; CHECK-NEXT: [[TMP20:%.*]] = or i64 [[TMP18]], [[TMP19]]
+; CHECK-NEXT: store i64 [[TMP20]], ptr [[TMP15]], align 8
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[TMP15]], i32 1
+; CHECK-NEXT: store i64 [[TMP20]], ptr [[TMP21]], align 8
+; CHECK-NEXT: br label %[[BB16]]
+; CHECK: [[BB16]]:
+; CHECK-NEXT: store i128 [[X]], ptr [[P]], align 8
+; CHECK-NEXT: ret void
+;
entry:
store i128 %x, ptr %p
ret void
}
-; CHECK-LABEL: @Store16
-; CHECK: entry:
-; CHECK: @__msan_get_context_state()
-; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0
-; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]]
-; Load the shadow of %p and check it
-; CHECK: load i64
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; CHECK: @__msan_metadata_ptr_for_store_n(ptr %p, i64 16)
-; CHECK: store i128
-; If the new shadow is non-zero, jump to __msan_chain_origin()
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; CHECK: @__msan_chain_origin
-; Storing origin here:
-; CHECK: store i64
-; CHECK: br label
-; CHECK: {{^[0-9]+}}:
-; CHECK: store i128
-; CHECK: ret void
-
-
; Check instrumentation of loads
define i8 @Load1(ptr nocapture %p) nounwind uwtable sanitize_memory {
+; CHECK-LABEL: define i8 @Load1(
+; CHECK-SAME: ptr captures(none) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
+; CHECK: [[BB3]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]]
+; CHECK-NEXT: br label %[[BB4]]
+; CHECK: [[BB4]]:
+; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[P]], align 1
+; CHECK-NEXT: [[TMP8:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_1(ptr [[P]])
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 0
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 1
+; CHECK-NEXT: [[_MSLD:%.*]] = load i8, ptr [[TMP9]], align 1
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
+; CHECK-NEXT: store i8 [[_MSLD]], ptr [[RETVAL_SHADOW]], align 8
+; CHECK-NEXT: store i32 [[TMP11]], ptr [[RETVAL_ORIGIN]], align 4
+; CHECK-NEXT: ret i8 [[TMP7]]
+;
entry:
%0 = load i8, ptr %p
ret i8 %0
}
-; CHECK-LABEL: @Load1
-; CHECK: entry:
-; CHECK: @__msan_get_context_state()
-; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0
-; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]]
-; Load the shadow of %p and check it
-; CHECK: load i64
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; Load the value from %p. This is done before accessing the shadow
-; to ease atomic handling.
-; CHECK: load i8
-; CHECK: @__msan_metadata_ptr_for_load_1(ptr %p)
-; Load the shadow and origin.
-; CHECK: load i8
-; CHECK: load i32
-
-
define i16 @Load2(ptr nocapture %p) nounwind uwtable sanitize_memory {
+; CHECK-LABEL: define i16 @Load2(
+; CHECK-SAME: ptr captures(none) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
+; CHECK: [[BB3]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]]
+; CHECK-NEXT: br label %[[BB4]]
+; CHECK: [[BB4]]:
+; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr [[P]], align 2
+; CHECK-NEXT: [[TMP8:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_2(ptr [[P]])
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 0
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 1
+; CHECK-NEXT: [[_MSLD:%.*]] = load i16, ptr [[TMP9]], align 2
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
+; CHECK-NEXT: store i16 [[_MSLD]], ptr [[RETVAL_SHADOW]], align 8
+; CHECK-NEXT: store i32 [[TMP11]], ptr [[RETVAL_ORIGIN]], align 4
+; CHECK-NEXT: ret i16 [[TMP7]]
+;
entry:
%0 = load i16, ptr %p
ret i16 %0
}
-; CHECK-LABEL: @Load2
-; CHECK: entry:
-; CHECK: @__msan_get_context_state()
-; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0
-; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]]
-; Load the shadow of %p and check it
-; CHECK: load i64
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; Load the value from %p. This is done before accessing the shadow
-; to ease atomic handling.
-; CHECK: load i16
-; CHECK: @__msan_metadata_ptr_for_load_2(ptr %p)
-; Load the shadow and origin.
-; CHECK: load i16
-; CHECK: load i32
-
-
define i32 @Load4(ptr nocapture %p) nounwind uwtable sanitize_memory {
+; CHECK-LABEL: define i32 @Load4(
+; CHECK-SAME: ptr captures(none) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
+; CHECK: [[BB3]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]]
+; CHECK-NEXT: br label %[[BB4]]
+; CHECK: [[BB4]]:
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: [[TMP8:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_4(ptr [[P]])
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 0
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 1
+; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP9]], align 4
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
+; CHECK-NEXT: store i32 [[_MSLD]], ptr [[RETVAL_SHADOW]], align 8
+; CHECK-NEXT: store i32 [[TMP11]], ptr [[RETVAL_ORIGIN]], align 4
+; CHECK-NEXT: ret i32 [[TMP7]]
+;
entry:
%0 = load i32, ptr %p
ret i32 %0
}
-; CHECK-LABEL: @Load4
-; CHECK: entry:
-; CHECK: @__msan_get_context_state()
-; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0
-; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]]
-; Load the shadow of %p and check it
-; CHECK: load i64
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; Load the value from %p. This is done before accessing the shadow
-; to ease atomic handling.
-; CHECK: load i32
-; CHECK: @__msan_metadata_ptr_for_load_4(ptr %p)
-; Load the shadow and origin.
-; CHECK: load i32
-; CHECK: load i32
-
define i64 @Load8(ptr nocapture %p) nounwind uwtable sanitize_memory {
+; CHECK-LABEL: define i64 @Load8(
+; CHECK-SAME: ptr captures(none) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
+; CHECK: [[BB3]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]]
+; CHECK-NEXT: br label %[[BB4]]
+; CHECK: [[BB4]]:
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[P]], align 8
+; CHECK-NEXT: [[TMP8:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_8(ptr [[P]])
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 0
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 1
+; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP9]], align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr [[RETVAL_SHADOW]], align 8
+; CHECK-NEXT: store i32 [[TMP11]], ptr [[RETVAL_ORIGIN]], align 4
+; CHECK-NEXT: ret i64 [[TMP7]]
+;
entry:
%0 = load i64, ptr %p
ret i64 %0
}
-; CHECK-LABEL: @Load8
-; CHECK: entry:
-; CHECK: @__msan_get_context_state()
-; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0
-; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]]
-; Load the shadow of %p and check it
-; CHECK: load i64
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; Load the value from %p. This is done before accessing the shadow
-; to ease atomic handling.
-; CHECK: load i64
-; CHECK: @__msan_metadata_ptr_for_load_8(ptr %p)
-; Load the shadow and origin.
-; CHECK: load i64
-; CHECK: load i32
-
define i128 @Load16(ptr nocapture %p) nounwind uwtable sanitize_memory {
+; CHECK-LABEL: define i128 @Load16(
+; CHECK-SAME: ptr captures(none) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
+; CHECK: [[BB3]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]]
+; CHECK-NEXT: br label %[[BB4]]
+; CHECK: [[BB4]]:
+; CHECK-NEXT: [[TMP7:%.*]] = load i128, ptr [[P]], align 8
+; CHECK-NEXT: [[TMP8:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_n(ptr [[P]], i64 16)
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 0
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 1
+; CHECK-NEXT: [[_MSLD:%.*]] = load i128, ptr [[TMP9]], align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 8
+; CHECK-NEXT: store i128 [[_MSLD]], ptr [[RETVAL_SHADOW]], align 8
+; CHECK-NEXT: store i32 [[TMP11]], ptr [[RETVAL_ORIGIN]], align 4
+; CHECK-NEXT: ret i128 [[TMP7]]
+;
entry:
%0 = load i128, ptr %p
ret i128 %0
}
-; CHECK-LABEL: @Load16
-; CHECK: entry:
-; CHECK: @__msan_get_context_state()
-; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0
-; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]]
-; Load the shadow of %p and check it
-; CHECK: load i64
-; CHECK: icmp
-; CHECK: br i1
-; CHECK: {{^[0-9]+}}:
-; Load the value from %p. This is done before accessing the shadow
-; to ease atomic handling.
-; CHECK: load i128
-; CHECK: @__msan_metadata_ptr_for_load_n(ptr %p, i64 16)
-; Load the shadow and origin.
-; CHECK: load i128
-; CHECK: load i32
-
-
; Test kernel-specific va_list instrumentation
%struct.__va_list_tag = type { i32, i32, ptr, ptr }
@@ -319,6 +466,68 @@ declare dso_local i32 @VAListFn(ptr, ptr) local_unnamed_addr
; Function Attrs: nounwind uwtable
define dso_local i32 @VarArgFn(ptr %fmt, ...) local_unnamed_addr sanitize_memory #0 {
+; CHECK-LABEL: define dso_local i32 @VarArgFn(
+; CHECK-SAME: ptr [[FMT:%.*]], ...) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8
+; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[VA_ARG_OVERFLOW_SIZE]], align 8
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 48, [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = alloca i8, i64 [[TMP6]], align 8
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 [[TMP6]], i1 false)
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP6]], i64 800)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP7]], ptr align 8 [[VA_ARG_SHADOW]], i64 [[TMP8]], i1 false)
+; CHECK-NEXT: [[TMP9:%.*]] = alloca i8, i64 [[TMP6]], align 8
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP9]], ptr align 8 [[VA_ARG_ORIGIN]], i64 [[TMP8]], i1 false)
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT: call void @__msan_poison_alloca(ptr [[ARGS]], i64 24, ptr @[[GLOB0:[0-9]+]])
+; CHECK-NEXT: [[TMP10:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr [[ARGS]])
+; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 0
+; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 1
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP11]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16
+; CHECK-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8
+; CHECK-NEXT: [[TMP17:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr [[TMP16]])
+; CHECK-NEXT: [[TMP18:%.*]] = extractvalue { ptr, ptr } [[TMP17]], 0
+; CHECK-NEXT: [[TMP19:%.*]] = extractvalue { ptr, ptr } [[TMP17]], 1
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP18]], ptr align 16 [[TMP7]], i64 48, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP19]], ptr align 16 [[TMP9]], i64 48, i1 false)
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8
+; CHECK-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 8
+; CHECK-NEXT: [[TMP24:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr [[TMP23]])
+; CHECK-NEXT: [[TMP25:%.*]] = extractvalue { ptr, ptr } [[TMP24]], 0
+; CHECK-NEXT: [[TMP26:%.*]] = extractvalue { ptr, ptr } [[TMP24]], 1
+; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP7]], i32 48
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP25]], ptr align 16 [[TMP27]], i64 [[TMP5]], i1 false)
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[TMP9]], i32 48
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP26]], ptr align 16 [[TMP28]], i64 [[TMP5]], i1 false)
+; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: store i64 [[TMP2]], ptr [[_MSARG1]], align 8
+; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0
+; CHECK-NEXT: store i32 [[TMP4]], ptr [[_MSARG_O2]], align 4
+; CHECK-NEXT: [[_MSARG3:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8
+; CHECK-NEXT: store i64 0, ptr [[_MSARG3]], align 8
+; CHECK-NEXT: store i32 0, ptr [[RETVAL_SHADOW]], align 8
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @VAListFn(ptr [[FMT]], ptr nonnull [[ARGS]])
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr [[RETVAL_SHADOW]], align 8
+; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[RETVAL_ORIGIN]], align 4
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
+; CHECK-NEXT: store i32 [[_MSRET]], ptr [[RETVAL_SHADOW]], align 8
+; CHECK-NEXT: store i32 [[TMP33]], ptr [[RETVAL_ORIGIN]], align 4
+; CHECK-NEXT: ret i32 [[CALL]]
+;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
call void @llvm.va_start(ptr nonnull %args)
@@ -330,52 +539,45 @@ entry:
; Kernel is built without SSE support.
attributes #0 = { "target-features"="+fxsr,+x87,-sse" }
-; CHECK-LABEL: @VarArgFn
-; CHECK: @__msan_get_context_state()
-; CHECK: [[VA_ARG_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 2
-; CHECK: [[VA_ARG_ORIGIN:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 3
-; CHECK: [[VA_ARG_OVERFLOW_SIZE:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 4
-; CHECK: [[OSIZE:%[0-9]+]] = load i64, ptr [[VA_ARG_OVERFLOW_SIZE]]
; Register save area is 48 bytes for non-SSE builds.
-; CHECK: [[SIZE:%[0-9]+]] = add i64 48, [[OSIZE]]
-; CHECK: [[SHADOWS:%[0-9]+]] = alloca i8, i64 [[SIZE]]
-; CHECK: call void @llvm.memset{{.*}}(ptr align 8 [[SHADOWS]], i8 0, i64 [[SIZE]], i1 false)
-; CHECK: [[COPYSZ:%[0-9]+]] = call i64 @llvm.umin.i64(i64 [[SIZE]], i64 800)
-; CHECK: call void @llvm.memcpy{{.*}}(ptr align 8 [[SHADOWS]], ptr align 8 [[VA_ARG_SHADOW]], i64 [[COPYSZ]]
-; CHECK: [[ORIGINS:%[0-9]+]] = alloca i8, i64 [[SIZE]]
-; CHECK: call void @llvm.memcpy{{.*}}(ptr align 8 [[ORIGINS]], ptr align 8 [[VA_ARG_ORIGIN]], i64 [[COPYSZ]]
-; CHECK: call i32 @VAListFn
; Function Attrs: nounwind uwtable
define dso_local void @VarArgCaller() local_unnamed_addr sanitize_memory {
+; CHECK-LABEL: define dso_local void @VarArgCaller(
+; CHECK-SAME: ) local_unnamed_addr #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
+; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
+; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2
+; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3
+; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
+; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
+; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0
+; CHECK-NEXT: store i64 0, ptr [[_MSARG]], align 8
+; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8
+; CHECK-NEXT: store i32 0, ptr [[_MSARG1]], align 8
+; CHECK-NEXT: [[_MSARG_VA_S:%.*]] = getelementptr i8, ptr [[VA_ARG_SHADOW]], i64 0
+; CHECK-NEXT: [[_MSARG_VA_O:%.*]] = getelementptr i8, ptr [[VA_ARG_ORIGIN]], i64 0
+; CHECK-NEXT: [[_MSARG_VA_S2:%.*]] = getelementptr i8, ptr [[VA_ARG_SHADOW]], i64 8
+; CHECK-NEXT: [[_MSARG_VA_O3:%.*]] = getelementptr i8, ptr [[VA_ARG_ORIGIN]], i64 8
+; CHECK-NEXT: store i32 0, ptr [[_MSARG_VA_S2]], align 8
+; CHECK-NEXT: store i32 0, ptr [[_MSARG_VA_O3]], align 8
+; CHECK-NEXT: store i64 0, ptr [[VA_ARG_OVERFLOW_SIZE]], align 8
+; CHECK-NEXT: store i32 0, ptr [[RETVAL_SHADOW]], align 8
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (ptr, ...) @VarArgFn(ptr @.str, i32 123)
+; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr [[RETVAL_SHADOW]], align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL_ORIGIN]], align 4
+; CHECK-NEXT: ret void
+;
entry:
%call = tail call i32 (ptr, ...) @VarArgFn(ptr @.str, i32 123)
ret void
}
-; CHECK-LABEL: @VarArgCaller
-
-; CHECK: entry:
-; CHECK: @__msan_get_context_state()
-; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0
-; CHECK: [[VA_ARG_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 2
-; CHECK: [[VA_ARG_OVERFLOW_SIZE:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 4
-
-; CHECK: [[PARAM_SI:%[_a-z0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]]
-; CHECK: [[ARG1_S:%[_a-z0-9]+]] = inttoptr i64 [[PARAM_SI]] to ptr
-; First argument is initialized
-; CHECK: store i64 0, ptr [[ARG1_S]]
-
-; Dangling cast of va_arg_shadow[0], unused because the first argument is fixed.
-; CHECK: [[VA_CAST0:%[_a-z0-9]+]] = ptrtoint {{.*}} [[VA_ARG_SHADOW]] to i64
-
-; CHECK: [[VA_CAST1:%[_a-z0-9]+]] = ptrtoint {{.*}} [[VA_ARG_SHADOW]] to i64
-; CHECK: [[ARG1_SI:%[_a-z0-9]+]] = add i64 [[VA_CAST1]], 8
-; CHECK: [[PARG1_S:%[_a-z0-9]+]] = inttoptr i64 [[ARG1_SI]] to ptr
-
-; Shadow for 123 is 0.
-; CHECK: store i32 0, ptr [[ARG1_S]]
-
-; CHECK: store i64 0, ptr [[VA_ARG_OVERFLOW_SIZE]]
-; CHECK: call i32 (ptr, ...) @VarArgFn({{.*}} @.str{{.*}} i32 123)
+;.
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
+;.
diff --git a/llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll b/llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll
index 24276a2..e883416 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll
@@ -8,7 +8,7 @@ define void @test_memcpy(ptr %p, ptr byval(i32) %p2) sanitize_memory {
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P2:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP3]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 4, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP3]], ptr align 4 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 4, i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call ptr @__msan_memcpy(ptr [[P:%.*]], ptr [[P2]], i64 4)
; CHECK-NEXT: ret void
@@ -22,7 +22,7 @@ define void @test_memmove(ptr %p, ptr byval(i32) %p2) sanitize_memory {
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P2:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP3]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 4, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP3]], ptr align 4 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 4, i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = call ptr @__msan_memmove(ptr [[P:%.*]], ptr [[P2]], i64 4)
; CHECK-NEXT: ret void
diff --git a/llvm/test/Instrumentation/MemorySanitizer/or.ll b/llvm/test/Instrumentation/MemorySanitizer/or.ll
index 20993a5..ce33022 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/or.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/or.ll
@@ -11,7 +11,7 @@ define i8 @test_or(i8 %a, i8 %b) sanitize_memory {
; CHECK-LABEL: define i8 @test_or(
; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = xor i8 [[A]], -1
; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[B]], -1
@@ -32,7 +32,7 @@ define i8 @test_disjoint_or(i8 %a, i8 %b) sanitize_memory {
; CHECK-IMPRECISE-LABEL: define i8 @test_disjoint_or(
; CHECK-IMPRECISE-SAME: i8 [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
; CHECK-IMPRECISE-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
-; CHECK-IMPRECISE-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-IMPRECISE-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-IMPRECISE-NEXT: call void @llvm.donothing()
; CHECK-IMPRECISE-NEXT: [[TMP3:%.*]] = xor i8 [[A]], -1
; CHECK-IMPRECISE-NEXT: [[TMP4:%.*]] = xor i8 [[B]], -1
@@ -48,7 +48,7 @@ define i8 @test_disjoint_or(i8 %a, i8 %b) sanitize_memory {
; CHECK-PRECISE-LABEL: define i8 @test_disjoint_or(
; CHECK-PRECISE-SAME: i8 [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
; CHECK-PRECISE-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
-; CHECK-PRECISE-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-PRECISE-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-PRECISE-NEXT: call void @llvm.donothing()
; CHECK-PRECISE-NEXT: [[TMP3:%.*]] = xor i8 [[A]], -1
; CHECK-PRECISE-NEXT: [[TMP4:%.*]] = xor i8 [[B]], -1
diff --git a/llvm/test/Instrumentation/MemorySanitizer/overflow.ll b/llvm/test/Instrumentation/MemorySanitizer/overflow.ll
index 0cfae00..9c9efcb 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/overflow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/overflow.ll
@@ -8,7 +8,7 @@ define {i64, i1} @test_sadd_with_overflow(i64 %a, i64 %b) #0 {
; CHECK-LABEL: define { i64, i1 } @test_sadd_with_overflow(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0
@@ -26,7 +26,7 @@ define {i64, i1} @test_uadd_with_overflow(i64 %a, i64 %b) #0 {
; CHECK-LABEL: define { i64, i1 } @test_uadd_with_overflow(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0
@@ -44,7 +44,7 @@ define {i64, i1} @test_smul_with_overflow(i64 %a, i64 %b) #0 {
; CHECK-LABEL: define { i64, i1 } @test_smul_with_overflow(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0
@@ -61,7 +61,7 @@ define {i64, i1} @test_umul_with_overflow(i64 %a, i64 %b) #0 {
; CHECK-LABEL: define { i64, i1 } @test_umul_with_overflow(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0
@@ -78,7 +78,7 @@ define {i64, i1} @test_ssub_with_overflow(i64 %a, i64 %b) #0 {
; CHECK-LABEL: define { i64, i1 } @test_ssub_with_overflow(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0
@@ -95,7 +95,7 @@ define {i64, i1} @test_usub_with_overflow(i64 %a, i64 %b) #0 {
; CHECK-LABEL: define { i64, i1 } @test_usub_with_overflow(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0
@@ -113,7 +113,7 @@ define {<4 x i32>, <4 x i1>} @test_sadd_with_overflow_vec(<4 x i32> %a, <4 x i32
; CHECK-LABEL: define { <4 x i32>, <4 x i1> } @test_sadd_with_overflow_vec(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
diff --git a/llvm/test/Instrumentation/MemorySanitizer/pr32842.ll b/llvm/test/Instrumentation/MemorySanitizer/pr32842.ll
index 6d275b3..87ff4e6 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/pr32842.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/pr32842.ll
@@ -13,7 +13,7 @@ define zeroext i1 @_Z1fii(i32 %x, i32 %y) sanitize_memory {
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP10:%.*]] = xor i32 [[X]], -2147483648
; CHECK-NEXT: [[TMP3:%.*]] = xor i32 [[TMP0]], -1
diff --git a/llvm/test/Instrumentation/MemorySanitizer/saturating.ll b/llvm/test/Instrumentation/MemorySanitizer/saturating.ll
index dcd8a08..9473523 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/saturating.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/saturating.ll
@@ -8,7 +8,7 @@ define i64 @test_sadd_sat(i64 %a, i64 %b) #0 {
; CHECK-LABEL: define i64 @test_sadd_sat(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.sadd.sat.i64(i64 [[A]], i64 [[B]])
@@ -23,7 +23,7 @@ define i64 @test_uadd_sat(i64 %a, i64 %b) #0 {
; CHECK-LABEL: define i64 @test_uadd_sat(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A]], i64 [[B]])
@@ -38,7 +38,7 @@ define i64 @test_ssub_sat(i64 %a, i64 %b) #0 {
; CHECK-LABEL: define i64 @test_ssub_sat(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A]], i64 [[B]])
@@ -53,7 +53,7 @@ define i64 @test_usub_sat(i64 %a, i64 %b) #0 {
; CHECK-LABEL: define i64 @test_usub_sat(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A]], i64 [[B]])
@@ -68,7 +68,7 @@ define i64 @test_sshl_sat(i64 %a, i64 %b) #0 {
; CHECK-LABEL: define i64 @test_sshl_sat(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.sshl.sat.i64(i64 [[A]], i64 [[B]])
@@ -83,7 +83,7 @@ define i64 @test_ushl_sat(i64 %a, i64 %b) #0 {
; CHECK-LABEL: define i64 @test_ushl_sat(
; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.ushl.sat.i64(i64 [[A]], i64 [[B]])
@@ -98,7 +98,7 @@ define <4 x i32> @test_sadd_sat_vec(<4 x i32> %a, <4 x i32> %b) #0 {
; CHECK-LABEL: define <4 x i32> @test_sadd_sat_vec(
; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> [[A]], <4 x i32> [[B]])
diff --git a/llvm/test/Instrumentation/MemorySanitizer/scmp.ll b/llvm/test/Instrumentation/MemorySanitizer/scmp.ll
index 5c94c21..0d4799f 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/scmp.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/scmp.ll
@@ -10,7 +10,7 @@ define i8 @scmp.8.8(i8 %x, i8 %y) nounwind #0 {
; CHECK-LABEL: define i8 @scmp.8.8(
; CHECK-SAME: i8 [[X:%.*]], i8 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i8 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i8 [[_MSPROP]], 0
@@ -26,7 +26,7 @@ define i8 @scmp.8.16(i16 %x, i16 %y) nounwind #0 {
; CHECK-LABEL: define i8 @scmp.8.16(
; CHECK-SAME: i16 [[X:%.*]], i16 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i16 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i16 [[_MSPROP]], 0
@@ -43,7 +43,7 @@ define i8 @scmp.8.32(i32 %x, i32 %y) nounwind #0 {
; CHECK-LABEL: define i8 @scmp.8.32(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
@@ -60,7 +60,7 @@ define i8 @scmp.8.64(i64 %x, i64 %y) nounwind #0 {
; CHECK-LABEL: define i8 @scmp.8.64(
; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
@@ -77,7 +77,7 @@ define i8 @scmp.8.128(i128 %x, i128 %y) nounwind #0 {
; CHECK-LABEL: define i8 @scmp.8.128(
; CHECK-SAME: i128 [[X:%.*]], i128 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i128, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i128, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i128 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i128 [[_MSPROP]], 0
@@ -94,7 +94,7 @@ define i32 @scmp.32.32(i32 %x, i32 %y) nounwind #0 {
; CHECK-LABEL: define i32 @scmp.32.32(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
@@ -110,7 +110,7 @@ define i32 @scmp.32.64(i64 %x, i64 %y) nounwind #0 {
; CHECK-LABEL: define i32 @scmp.32.64(
; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
@@ -127,7 +127,7 @@ define i64 @scmp.64.64(i64 %x, i64 %y) nounwind #0 {
; CHECK-LABEL: define i64 @scmp.64.64(
; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
@@ -143,7 +143,7 @@ define i4 @scmp_narrow_result(i32 %x, i32 %y) nounwind #0 {
; CHECK-LABEL: define i4 @scmp_narrow_result(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
@@ -160,7 +160,7 @@ define i8 @scmp_narrow_op(i62 %x, i62 %y) nounwind #0 {
; CHECK-LABEL: define i8 @scmp_narrow_op(
; CHECK-SAME: i62 [[X:%.*]], i62 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i62, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i62, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i62, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i62 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i62 [[_MSPROP]], 0
@@ -177,7 +177,7 @@ define i141 @scmp_wide_result(i32 %x, i32 %y) nounwind #0 {
; CHECK-LABEL: define i141 @scmp_wide_result(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
@@ -194,7 +194,7 @@ define i8 @scmp_wide_op(i109 %x, i109 %y) nounwind #0 {
; CHECK-LABEL: define i8 @scmp_wide_op(
; CHECK-SAME: i109 [[X:%.*]], i109 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i109, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i109, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i109, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i109 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i109 [[_MSPROP]], 0
@@ -211,7 +211,7 @@ define i41 @scmp_uncommon_types(i7 %x, i7 %y) nounwind #0 {
; CHECK-LABEL: define i41 @scmp_uncommon_types(
; CHECK-SAME: i7 [[X:%.*]], i7 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i7, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i7, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i7, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i7 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i7 [[_MSPROP]], 0
@@ -228,7 +228,7 @@ define <4 x i32> @scmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @scmp_normal_vectors(
; CHECK-SAME: <4 x i32> [[X:%.*]], <4 x i32> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], zeroinitializer
@@ -244,7 +244,7 @@ define <4 x i8> @scmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind #0
; CHECK-LABEL: define <4 x i8> @scmp_narrow_vec_result(
; CHECK-SAME: <4 x i32> [[X:%.*]], <4 x i32> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], zeroinitializer
@@ -261,7 +261,7 @@ define <4 x i32> @scmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @scmp_narrow_vec_op(
; CHECK-SAME: <4 x i8> [[X:%.*]], <4 x i8> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i8> [[_MSPROP]], zeroinitializer
@@ -278,7 +278,7 @@ define <16 x i32> @scmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind #0
; CHECK-LABEL: define <16 x i32> @scmp_wide_vec_result(
; CHECK-SAME: <16 x i8> [[X:%.*]], <16 x i8> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i8> [[_MSPROP]], zeroinitializer
@@ -295,7 +295,7 @@ define <16 x i8> @scmp_wide_vec_op(<16 x i64> %x, <16 x i64> %y) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @scmp_wide_vec_op(
; CHECK-SAME: <16 x i64> [[X:%.*]], <16 x i64> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i64> [[_MSPROP]], zeroinitializer
@@ -312,7 +312,7 @@ define <7 x i117> @scmp_uncommon_vectors(<7 x i7> %x, <7 x i7> %y) nounwind #0 {
; CHECK-LABEL: define <7 x i117> @scmp_uncommon_vectors(
; CHECK-SAME: <7 x i7> [[X:%.*]], <7 x i7> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <7 x i7>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <7 x i7>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <7 x i7>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <7 x i7> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <7 x i7> [[_MSPROP]], zeroinitializer
@@ -329,7 +329,7 @@ define <1 x i3> @scmp_scalarize(<1 x i33> %x, <1 x i33> %y) nounwind #0 {
; CHECK-LABEL: define <1 x i3> @scmp_scalarize(
; CHECK-SAME: <1 x i33> [[X:%.*]], <1 x i33> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i33>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i33>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i33>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <1 x i33> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <1 x i33> [[_MSPROP]], zeroinitializer
@@ -346,7 +346,7 @@ define <2 x i8> @scmp_bool_operands(<2 x i1> %x, <2 x i1> %y) nounwind #0 {
; CHECK-LABEL: define <2 x i8> @scmp_bool_operands(
; CHECK-SAME: <2 x i1> [[X:%.*]], <2 x i1> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i1>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i1> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i1> [[_MSPROP]], zeroinitializer
@@ -363,7 +363,7 @@ define <2 x i16> @scmp_ret_wider_than_operands(<2 x i8> %x, <2 x i8> %y) nounwin
; CHECK-LABEL: define <2 x i16> @scmp_ret_wider_than_operands(
; CHECK-SAME: <2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i8> [[_MSPROP]], zeroinitializer
diff --git a/llvm/test/Instrumentation/MemorySanitizer/ucmp.ll b/llvm/test/Instrumentation/MemorySanitizer/ucmp.ll
index 1b70242..3c9d6d8 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/ucmp.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/ucmp.ll
@@ -10,7 +10,7 @@ define i8 @ucmp.8.8(i8 %x, i8 %y) nounwind #0 {
; CHECK-LABEL: define i8 @ucmp.8.8(
; CHECK-SAME: i8 [[X:%.*]], i8 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i8 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i8 [[_MSPROP]], 0
@@ -26,7 +26,7 @@ define i8 @ucmp.8.16(i16 %x, i16 %y) nounwind #0 {
; CHECK-LABEL: define i8 @ucmp.8.16(
; CHECK-SAME: i16 [[X:%.*]], i16 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i16 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i16 [[_MSPROP]], 0
@@ -43,7 +43,7 @@ define i8 @ucmp.8.32(i32 %x, i32 %y) nounwind #0 {
; CHECK-LABEL: define i8 @ucmp.8.32(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
@@ -60,7 +60,7 @@ define i8 @ucmp.8.64(i64 %x, i64 %y) nounwind #0 {
; CHECK-LABEL: define i8 @ucmp.8.64(
; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
@@ -77,7 +77,7 @@ define i8 @ucmp.8.128(i128 %x, i128 %y) nounwind #0 {
; CHECK-LABEL: define i8 @ucmp.8.128(
; CHECK-SAME: i128 [[X:%.*]], i128 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i128, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i128, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i128 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i128 [[_MSPROP]], 0
@@ -94,7 +94,7 @@ define i32 @ucmp.32.32(i32 %x, i32 %y) nounwind #0 {
; CHECK-LABEL: define i32 @ucmp.32.32(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
@@ -110,7 +110,7 @@ define i32 @ucmp.32.64(i64 %x, i64 %y) nounwind #0 {
; CHECK-LABEL: define i32 @ucmp.32.64(
; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
@@ -127,7 +127,7 @@ define i64 @ucmp.64.64(i64 %x, i64 %y) nounwind #0 {
; CHECK-LABEL: define i64 @ucmp.64.64(
; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
@@ -143,7 +143,7 @@ define i4 @ucmp_narrow_result(i32 %x, i32 %y) nounwind #0 {
; CHECK-LABEL: define i4 @ucmp_narrow_result(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
@@ -160,7 +160,7 @@ define i8 @ucmp_narrow_op(i62 %x, i62 %y) nounwind #0 {
; CHECK-LABEL: define i8 @ucmp_narrow_op(
; CHECK-SAME: i62 [[X:%.*]], i62 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i62, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i62, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i62, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i62 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i62 [[_MSPROP]], 0
@@ -177,7 +177,7 @@ define i141 @ucmp_wide_result(i32 %x, i32 %y) nounwind #0 {
; CHECK-LABEL: define i141 @ucmp_wide_result(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
@@ -194,7 +194,7 @@ define i8 @ucmp_wide_op(i109 %x, i109 %y) nounwind #0 {
; CHECK-LABEL: define i8 @ucmp_wide_op(
; CHECK-SAME: i109 [[X:%.*]], i109 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i109, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i109, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i109, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i109 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i109 [[_MSPROP]], 0
@@ -211,7 +211,7 @@ define i41 @ucmp_uncommon_types(i7 %x, i7 %y) nounwind #0 {
; CHECK-LABEL: define i41 @ucmp_uncommon_types(
; CHECK-SAME: i7 [[X:%.*]], i7 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i7, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i7, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i7, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or i7 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or i7 [[_MSPROP]], 0
@@ -228,7 +228,7 @@ define <4 x i32> @ucmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @ucmp_normal_vectors(
; CHECK-SAME: <4 x i32> [[X:%.*]], <4 x i32> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], zeroinitializer
@@ -244,7 +244,7 @@ define <4 x i8> @ucmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind #0
; CHECK-LABEL: define <4 x i8> @ucmp_narrow_vec_result(
; CHECK-SAME: <4 x i32> [[X:%.*]], <4 x i32> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], zeroinitializer
@@ -261,7 +261,7 @@ define <4 x i32> @ucmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind #0 {
; CHECK-LABEL: define <4 x i32> @ucmp_narrow_vec_op(
; CHECK-SAME: <4 x i8> [[X:%.*]], <4 x i8> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i8> [[_MSPROP]], zeroinitializer
@@ -278,7 +278,7 @@ define <16 x i32> @ucmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind #0
; CHECK-LABEL: define <16 x i32> @ucmp_wide_vec_result(
; CHECK-SAME: <16 x i8> [[X:%.*]], <16 x i8> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i8> [[_MSPROP]], zeroinitializer
@@ -295,7 +295,7 @@ define <16 x i8> @ucmp_wide_vec_op(<16 x i32> %x, <16 x i32> %y) nounwind #0 {
; CHECK-LABEL: define <16 x i8> @ucmp_wide_vec_op(
; CHECK-SAME: <16 x i32> [[X:%.*]], <16 x i32> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
@@ -312,7 +312,7 @@ define <17 x i2> @ucmp_uncommon_vectors(<17 x i71> %x, <17 x i71> %y) nounwind #
; CHECK-LABEL: define <17 x i2> @ucmp_uncommon_vectors(
; CHECK-SAME: <17 x i71> [[X:%.*]], <17 x i71> [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <17 x i71>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <17 x i71>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <17 x i71>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <17 x i71> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <17 x i71> [[_MSPROP]], zeroinitializer
diff --git a/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fadd.ll b/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fadd.ll
index 5da4c73..bfc47dc 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fadd.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fadd.ll
@@ -13,7 +13,7 @@ define float @test_v2f32(float %a0, <2 x float> %a1) #0 {
; CHECK-LABEL: define float @test_v2f32(
; CHECK-SAME: float [[A0:%.*]], <2 x float> [[A1:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]]
@@ -29,7 +29,7 @@ define float @test_v4f32(float %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: define float @test_v4f32(
; CHECK-SAME: float [[A0:%.*]], <4 x float> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]]
@@ -45,7 +45,7 @@ define float @test_v8f32(float %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: define float @test_v8f32(
; CHECK-SAME: float [[A0:%.*]], <8 x float> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]]
@@ -61,7 +61,7 @@ define float @test_v16f32(float %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: define float @test_v16f32(
; CHECK-SAME: float [[A0:%.*]], <16 x float> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]]
@@ -138,7 +138,7 @@ define double @test_v2f64(double %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: define double @test_v2f64(
; CHECK-SAME: double [[A0:%.*]], <2 x double> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]]
@@ -154,7 +154,7 @@ define double @test_v4f64(double %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: define double @test_v4f64(
; CHECK-SAME: double [[A0:%.*]], <4 x double> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]]
@@ -170,7 +170,7 @@ define double @test_v8f64(double %a0, <8 x double> %a1) #0 {
; CHECK-LABEL: define double @test_v8f64(
; CHECK-SAME: double [[A0:%.*]], <8 x double> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]]
@@ -186,7 +186,7 @@ define double @test_v16f64(double %a0, <16 x double> %a1) #0 {
; CHECK-LABEL: define double @test_v16f64(
; CHECK-SAME: double [[A0:%.*]], <16 x double> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fmul.ll b/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fmul.ll
index 0c1c4ed..db86d55 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fmul.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fmul.ll
@@ -13,7 +13,7 @@ define float @test_v2f32(float %a0, <2 x float> %a1) #0 {
; CHECK-LABEL: define float @test_v2f32(
; CHECK-SAME: float [[A0:%.*]], <2 x float> [[A1:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]]
@@ -29,7 +29,7 @@ define float @test_v4f32(float %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: define float @test_v4f32(
; CHECK-SAME: float [[A0:%.*]], <4 x float> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]]
@@ -45,7 +45,7 @@ define float @test_v8f32(float %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: define float @test_v8f32(
; CHECK-SAME: float [[A0:%.*]], <8 x float> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]]
@@ -61,7 +61,7 @@ define float @test_v16f32(float %a0, <16 x float> %a1) #0 {
; CHECK-LABEL: define float @test_v16f32(
; CHECK-SAME: float [[A0:%.*]], <16 x float> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]]
@@ -138,7 +138,7 @@ define double @test_v2f64(double %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: define double @test_v2f64(
; CHECK-SAME: double [[A0:%.*]], <2 x double> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]]
@@ -154,7 +154,7 @@ define double @test_v4f64(double %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: define double @test_v4f64(
; CHECK-SAME: double [[A0:%.*]], <4 x double> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]]
@@ -170,7 +170,7 @@ define double @test_v8f64(double %a0, <8 x double> %a1) #0 {
; CHECK-LABEL: define double @test_v8f64(
; CHECK-SAME: double [[A0:%.*]], <8 x double> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]]
@@ -186,7 +186,7 @@ define double @test_v16f64(double %a0, <16 x double> %a1) #0 {
; CHECK-LABEL: define double @test_v16f64(
; CHECK-SAME: double [[A0:%.*]], <16 x double> [[A1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/vector_arith.ll b/llvm/test/Instrumentation/MemorySanitizer/vector_arith.ll
index d1060fb..1146131 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/vector_arith.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/vector_arith.ll
@@ -15,7 +15,7 @@ define <4 x i32> @Test_sse2_pmadd_wd(<8 x i16> %a, <8 x i16> %b) sanitize_memory
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[TMP0]], zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer
@@ -46,7 +46,7 @@ define <1 x i64> @Test_ssse3_pmadd_ub_sw(<1 x i64> %a, <1 x i64> %b) sanitize_me
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -82,7 +82,7 @@ define <2 x i64> @Test_x86_sse2_psad_bw(<16 x i8> %a, <16 x i8> %b) sanitize_mem
; CHECK-LABEL: define <2 x i64> @Test_x86_sse2_psad_bw(
; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x i64>
@@ -104,7 +104,7 @@ define <1 x i64> @Test_x86_mmx_psad_bw(<1 x i64> %a, <1 x i64> %b) sanitize_memo
; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = or <1 x i64> [[TMP0]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP2]] to i64
diff --git a/llvm/test/Instrumentation/MemorySanitizer/vscale.ll b/llvm/test/Instrumentation/MemorySanitizer/vscale.ll
index 0c0b393..514abed 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/vscale.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/vscale.ll
@@ -435,7 +435,7 @@ define void @fn_param(<vscale x 2 x float> %a, ptr %b) sanitize_memory {
define void @test_param(ptr %a, ptr %b) sanitize_memory {
; CHECK-LABEL: define void @test_param(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64
@@ -455,8 +455,8 @@ define void @test_param(ptr %a, ptr %b) sanitize_memory {
;
; ORIGIN-LABEL: define void @test_param(
; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[TMP3:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8
; ORIGIN-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s
index a313741..40fcd6f 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s
@@ -73,98 +73,98 @@ v_tanh_f32 v5, src_scc
v_tanh_f32 v255, 0xaf123456
// GFX1250: v_tanh_f32_e32 v255, 0xaf123456 ; encoding: [0xff,0x3c,0xfe,0x7f,0x56,0x34,0x12,0xaf]
-v_tanh_f16 v5, v1
-// GFX1250: v_tanh_f16_e32 v5, v1 ; encoding: [0x01,0x3f,0x0a,0x7e]
+v_tanh_f16 v5.l, v1.l
+// GFX1250: v_tanh_f16_e32 v5.l, v1.l ; encoding: [0x01,0x3f,0x0a,0x7e]
-v_tanh_f16 v5, v127
-// GFX1250: v_tanh_f16_e32 v5, v127 ; encoding: [0x7f,0x3f,0x0a,0x7e]
+v_tanh_f16 v5.l, v127.l
+// GFX1250: v_tanh_f16_e32 v5.l, v127.l ; encoding: [0x7f,0x3f,0x0a,0x7e]
-v_tanh_f16 v5, s1
-// GFX1250: v_tanh_f16_e32 v5, s1 ; encoding: [0x01,0x3e,0x0a,0x7e]
+v_tanh_f16 v5.l, s1
+// GFX1250: v_tanh_f16_e32 v5.l, s1 ; encoding: [0x01,0x3e,0x0a,0x7e]
-v_tanh_f16 v5, s105
-// GFX1250: v_tanh_f16_e32 v5, s105 ; encoding: [0x69,0x3e,0x0a,0x7e]
+v_tanh_f16 v5.l, s105
+// GFX1250: v_tanh_f16_e32 v5.l, s105 ; encoding: [0x69,0x3e,0x0a,0x7e]
-v_tanh_f16 v5, vcc_lo
-// GFX1250: v_tanh_f16_e32 v5, vcc_lo ; encoding: [0x6a,0x3e,0x0a,0x7e]
+v_tanh_f16 v5.l, vcc_lo
+// GFX1250: v_tanh_f16_e32 v5.l, vcc_lo ; encoding: [0x6a,0x3e,0x0a,0x7e]
-v_tanh_f16 v5, vcc_hi
-// GFX1250: v_tanh_f16_e32 v5, vcc_hi ; encoding: [0x6b,0x3e,0x0a,0x7e]
+v_tanh_f16 v5.l, vcc_hi
+// GFX1250: v_tanh_f16_e32 v5.l, vcc_hi ; encoding: [0x6b,0x3e,0x0a,0x7e]
-v_tanh_f16 v5, ttmp15
-// GFX1250: v_tanh_f16_e32 v5, ttmp15 ; encoding: [0x7b,0x3e,0x0a,0x7e]
+v_tanh_f16 v5.l, ttmp15
+// GFX1250: v_tanh_f16_e32 v5.l, ttmp15 ; encoding: [0x7b,0x3e,0x0a,0x7e]
-v_tanh_f16 v5, m0
-// GFX1250: v_tanh_f16_e32 v5, m0 ; encoding: [0x7d,0x3e,0x0a,0x7e]
+v_tanh_f16 v5.l, m0
+// GFX1250: v_tanh_f16_e32 v5.l, m0 ; encoding: [0x7d,0x3e,0x0a,0x7e]
-v_tanh_f16 v5, exec_lo
-// GFX1250: v_tanh_f16_e32 v5, exec_lo ; encoding: [0x7e,0x3e,0x0a,0x7e]
+v_tanh_f16 v5.l, exec_lo
+// GFX1250: v_tanh_f16_e32 v5.l, exec_lo ; encoding: [0x7e,0x3e,0x0a,0x7e]
-v_tanh_f16 v5, exec_hi
-// GFX1250: v_tanh_f16_e32 v5, exec_hi ; encoding: [0x7f,0x3e,0x0a,0x7e]
+v_tanh_f16 v5.l, exec_hi
+// GFX1250: v_tanh_f16_e32 v5.l, exec_hi ; encoding: [0x7f,0x3e,0x0a,0x7e]
-v_tanh_f16 v5, null
-// GFX1250: v_tanh_f16_e32 v5, null ; encoding: [0x7c,0x3e,0x0a,0x7e]
+v_tanh_f16 v5.l, null
+// GFX1250: v_tanh_f16_e32 v5.l, null ; encoding: [0x7c,0x3e,0x0a,0x7e]
-v_tanh_f16 v5, -1
-// GFX1250: v_tanh_f16_e32 v5, -1 ; encoding: [0xc1,0x3e,0x0a,0x7e]
+v_tanh_f16 v5.l, -1
+// GFX1250: v_tanh_f16_e32 v5.l, -1 ; encoding: [0xc1,0x3e,0x0a,0x7e]
-v_tanh_f16 v5, 0.5
-// GFX1250: v_tanh_f16_e32 v5, 0.5 ; encoding: [0xf0,0x3e,0x0a,0x7e]
+v_tanh_f16 v5.l, 0.5
+// GFX1250: v_tanh_f16_e32 v5.l, 0.5 ; encoding: [0xf0,0x3e,0x0a,0x7e]
-v_tanh_f16 v5, src_scc
-// GFX1250: v_tanh_f16_e32 v5, src_scc ; encoding: [0xfd,0x3e,0x0a,0x7e]
+v_tanh_f16 v5.l, src_scc
+// GFX1250: v_tanh_f16_e32 v5.l, src_scc ; encoding: [0xfd,0x3e,0x0a,0x7e]
-v_tanh_f16 v127, 0x8000
-// GFX1250: v_tanh_f16_e32 v127, 0x8000 ; encoding: [0xff,0x3e,0xfe,0x7e,0x00,0x80,0x00,0x00]
+v_tanh_f16 v127.l, 0x8000
+// GFX1250: v_tanh_f16_e32 v127.l, 0x8000 ; encoding: [0xff,0x3e,0xfe,0x7e,0x00,0x80,0x00,0x00]
v_tanh_f16 v5.h, v1.h
// GFX1250: v_tanh_f16_e32 v5.h, v1.h ; encoding: [0x81,0x3f,0x0a,0x7f]
-v_tanh_bf16 v5, v1
-// GFX1250: v_tanh_bf16_e32 v5, v1 ; encoding: [0x01,0x95,0x0a,0x7e]
+v_tanh_bf16 v5.l, v1.l
+// GFX1250: v_tanh_bf16_e32 v5.l, v1.l ; encoding: [0x01,0x95,0x0a,0x7e]
-v_tanh_bf16 v5, v127
-// GFX1250: v_tanh_bf16_e32 v5, v127 ; encoding: [0x7f,0x95,0x0a,0x7e]
+v_tanh_bf16 v5.l, v127.l
+// GFX1250: v_tanh_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0x95,0x0a,0x7e]
-v_tanh_bf16 v5, s1
-// GFX1250: v_tanh_bf16_e32 v5, s1 ; encoding: [0x01,0x94,0x0a,0x7e]
+v_tanh_bf16 v5.l, s1
+// GFX1250: v_tanh_bf16_e32 v5.l, s1 ; encoding: [0x01,0x94,0x0a,0x7e]
-v_tanh_bf16 v5, s105
-// GFX1250: v_tanh_bf16_e32 v5, s105 ; encoding: [0x69,0x94,0x0a,0x7e]
+v_tanh_bf16 v5.l, s105
+// GFX1250: v_tanh_bf16_e32 v5.l, s105 ; encoding: [0x69,0x94,0x0a,0x7e]
-v_tanh_bf16 v5, vcc_lo
-// GFX1250: v_tanh_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0x94,0x0a,0x7e]
+v_tanh_bf16 v5.l, vcc_lo
+// GFX1250: v_tanh_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0x94,0x0a,0x7e]
-v_tanh_bf16 v5, vcc_hi
-// GFX1250: v_tanh_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0x94,0x0a,0x7e]
+v_tanh_bf16 v5.l, vcc_hi
+// GFX1250: v_tanh_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0x94,0x0a,0x7e]
-v_tanh_bf16 v5, ttmp15
-// GFX1250: v_tanh_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0x94,0x0a,0x7e]
+v_tanh_bf16 v5.l, ttmp15
+// GFX1250: v_tanh_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0x94,0x0a,0x7e]
-v_tanh_bf16 v5, m0
-// GFX1250: v_tanh_bf16_e32 v5, m0 ; encoding: [0x7d,0x94,0x0a,0x7e]
+v_tanh_bf16 v5.l, m0
+// GFX1250: v_tanh_bf16_e32 v5.l, m0 ; encoding: [0x7d,0x94,0x0a,0x7e]
-v_tanh_bf16 v5, exec_lo
-// GFX1250: v_tanh_bf16_e32 v5, exec_lo ; encoding: [0x7e,0x94,0x0a,0x7e]
+v_tanh_bf16 v5.l, exec_lo
+// GFX1250: v_tanh_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0x94,0x0a,0x7e]
-v_tanh_bf16 v5, exec_hi
-// GFX1250: v_tanh_bf16_e32 v5, exec_hi ; encoding: [0x7f,0x94,0x0a,0x7e]
+v_tanh_bf16 v5.l, exec_hi
+// GFX1250: v_tanh_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0x94,0x0a,0x7e]
-v_tanh_bf16 v5, null
-// GFX1250: v_tanh_bf16_e32 v5, null ; encoding: [0x7c,0x94,0x0a,0x7e]
+v_tanh_bf16 v5.l, null
+// GFX1250: v_tanh_bf16_e32 v5.l, null ; encoding: [0x7c,0x94,0x0a,0x7e]
-v_tanh_bf16 v5, -1
-// GFX1250: v_tanh_bf16_e32 v5, -1 ; encoding: [0xc1,0x94,0x0a,0x7e]
+v_tanh_bf16 v5.l, -1
+// GFX1250: v_tanh_bf16_e32 v5.l, -1 ; encoding: [0xc1,0x94,0x0a,0x7e]
-v_tanh_bf16 v5, 0.5
-// GFX1250: v_tanh_bf16_e32 v5, 0.5 ; encoding: [0xf0,0x94,0x0a,0x7e]
+v_tanh_bf16 v5.l, 0.5
+// GFX1250: v_tanh_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0x94,0x0a,0x7e]
-v_tanh_bf16 v5, src_scc
-// GFX1250: v_tanh_bf16_e32 v5, src_scc ; encoding: [0xfd,0x94,0x0a,0x7e]
+v_tanh_bf16 v5.l, src_scc
+// GFX1250: v_tanh_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0x94,0x0a,0x7e]
-v_tanh_bf16 v127, 0x8000
-// GFX1250: v_tanh_bf16_e32 v127, 0x8000 ; encoding: [0xff,0x94,0xfe,0x7e,0x00,0x80,0x00,0x00]
+v_tanh_bf16 v127.l, 0x8000
+// GFX1250: v_tanh_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0x94,0xfe,0x7e,0x00,0x80,0x00,0x00]
v_tanh_bf16 v5.h, v1.h
// GFX1250: v_tanh_bf16_e32 v5.h, v1.h ; encoding: [0x81,0x95,0x0a,0x7f]
@@ -214,347 +214,347 @@ v_prng_b32 v5, src_scc
v_prng_b32 v255, 0xaf123456
// GFX1250: v_prng_b32_e32 v255, 0xaf123456 ; encoding: [0xff,0x96,0xfe,0x7f,0x56,0x34,0x12,0xaf]
-v_rcp_bf16 v5, v1
-// GFX1250: v_rcp_bf16_e32 v5, v1 ; encoding: [0x01,0xf3,0x0a,0x7e]
+v_rcp_bf16 v5.l, v1.l
+// GFX1250: v_rcp_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xf3,0x0a,0x7e]
-v_rcp_bf16 v5, v127
-// GFX1250: v_rcp_bf16_e32 v5, v127 ; encoding: [0x7f,0xf3,0x0a,0x7e]
+v_rcp_bf16 v5.l, v127.l
+// GFX1250: v_rcp_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xf3,0x0a,0x7e]
-v_rcp_bf16 v5, s1
-// GFX1250: v_rcp_bf16_e32 v5, s1 ; encoding: [0x01,0xf2,0x0a,0x7e]
+v_rcp_bf16 v5.l, s1
+// GFX1250: v_rcp_bf16_e32 v5.l, s1 ; encoding: [0x01,0xf2,0x0a,0x7e]
-v_rcp_bf16 v5, s105
-// GFX1250: v_rcp_bf16_e32 v5, s105 ; encoding: [0x69,0xf2,0x0a,0x7e]
+v_rcp_bf16 v5.l, s105
+// GFX1250: v_rcp_bf16_e32 v5.l, s105 ; encoding: [0x69,0xf2,0x0a,0x7e]
-v_rcp_bf16 v5, vcc_lo
-// GFX1250: v_rcp_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xf2,0x0a,0x7e]
+v_rcp_bf16 v5.l, vcc_lo
+// GFX1250: v_rcp_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xf2,0x0a,0x7e]
-v_rcp_bf16 v5, vcc_hi
-// GFX1250: v_rcp_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xf2,0x0a,0x7e]
+v_rcp_bf16 v5.l, vcc_hi
+// GFX1250: v_rcp_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xf2,0x0a,0x7e]
-v_rcp_bf16 v5, ttmp15
-// GFX1250: v_rcp_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xf2,0x0a,0x7e]
+v_rcp_bf16 v5.l, ttmp15
+// GFX1250: v_rcp_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xf2,0x0a,0x7e]
-v_rcp_bf16 v5, m0
-// GFX1250: v_rcp_bf16_e32 v5, m0 ; encoding: [0x7d,0xf2,0x0a,0x7e]
+v_rcp_bf16 v5.l, m0
+// GFX1250: v_rcp_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xf2,0x0a,0x7e]
-v_rcp_bf16 v5, exec_lo
-// GFX1250: v_rcp_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xf2,0x0a,0x7e]
+v_rcp_bf16 v5.l, exec_lo
+// GFX1250: v_rcp_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xf2,0x0a,0x7e]
-v_rcp_bf16 v5, exec_hi
-// GFX1250: v_rcp_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xf2,0x0a,0x7e]
+v_rcp_bf16 v5.l, exec_hi
+// GFX1250: v_rcp_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xf2,0x0a,0x7e]
-v_rcp_bf16 v5, null
-// GFX1250: v_rcp_bf16_e32 v5, null ; encoding: [0x7c,0xf2,0x0a,0x7e]
+v_rcp_bf16 v5.l, null
+// GFX1250: v_rcp_bf16_e32 v5.l, null ; encoding: [0x7c,0xf2,0x0a,0x7e]
-v_rcp_bf16 v5, -1
-// GFX1250: v_rcp_bf16_e32 v5, -1 ; encoding: [0xc1,0xf2,0x0a,0x7e]
+v_rcp_bf16 v5.l, -1
+// GFX1250: v_rcp_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xf2,0x0a,0x7e]
-v_rcp_bf16 v5, 0.5
-// GFX1250: v_rcp_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xf2,0x0a,0x7e]
+v_rcp_bf16 v5.l, 0.5
+// GFX1250: v_rcp_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xf2,0x0a,0x7e]
-v_rcp_bf16 v5, src_scc
-// GFX1250: v_rcp_bf16_e32 v5, src_scc ; encoding: [0xfd,0xf2,0x0a,0x7e]
+v_rcp_bf16 v5.l, src_scc
+// GFX1250: v_rcp_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xf2,0x0a,0x7e]
-v_rcp_bf16 v127, 0x8000
-// GFX1250: v_rcp_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xf2,0xfe,0x7e,0x00,0x80,0x00,0x00]
+v_rcp_bf16 v127.l, 0x8000
+// GFX1250: v_rcp_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xf2,0xfe,0x7e,0x00,0x80,0x00,0x00]
v_rcp_bf16 v5.h, v1.h
// GFX1250: v_rcp_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xf3,0x0a,0x7f]
-v_sqrt_bf16 v5, v1
-// GFX1250: v_sqrt_bf16_e32 v5, v1 ; encoding: [0x01,0xf5,0x0a,0x7e]
+v_sqrt_bf16 v5.l, v1.l
+// GFX1250: v_sqrt_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xf5,0x0a,0x7e]
-v_sqrt_bf16 v5, v127
-// GFX1250: v_sqrt_bf16_e32 v5, v127 ; encoding: [0x7f,0xf5,0x0a,0x7e]
+v_sqrt_bf16 v5.l, v127.l
+// GFX1250: v_sqrt_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xf5,0x0a,0x7e]
-v_sqrt_bf16 v5, s1
-// GFX1250: v_sqrt_bf16_e32 v5, s1 ; encoding: [0x01,0xf4,0x0a,0x7e]
+v_sqrt_bf16 v5.l, s1
+// GFX1250: v_sqrt_bf16_e32 v5.l, s1 ; encoding: [0x01,0xf4,0x0a,0x7e]
-v_sqrt_bf16 v5, s105
-// GFX1250: v_sqrt_bf16_e32 v5, s105 ; encoding: [0x69,0xf4,0x0a,0x7e]
+v_sqrt_bf16 v5.l, s105
+// GFX1250: v_sqrt_bf16_e32 v5.l, s105 ; encoding: [0x69,0xf4,0x0a,0x7e]
-v_sqrt_bf16 v5, vcc_lo
-// GFX1250: v_sqrt_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xf4,0x0a,0x7e]
+v_sqrt_bf16 v5.l, vcc_lo
+// GFX1250: v_sqrt_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xf4,0x0a,0x7e]
-v_sqrt_bf16 v5, vcc_hi
-// GFX1250: v_sqrt_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xf4,0x0a,0x7e]
+v_sqrt_bf16 v5.l, vcc_hi
+// GFX1250: v_sqrt_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xf4,0x0a,0x7e]
-v_sqrt_bf16 v5, ttmp15
-// GFX1250: v_sqrt_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xf4,0x0a,0x7e]
+v_sqrt_bf16 v5.l, ttmp15
+// GFX1250: v_sqrt_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xf4,0x0a,0x7e]
-v_sqrt_bf16 v5, m0
-// GFX1250: v_sqrt_bf16_e32 v5, m0 ; encoding: [0x7d,0xf4,0x0a,0x7e]
+v_sqrt_bf16 v5.l, m0
+// GFX1250: v_sqrt_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xf4,0x0a,0x7e]
-v_sqrt_bf16 v5, exec_lo
-// GFX1250: v_sqrt_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xf4,0x0a,0x7e]
+v_sqrt_bf16 v5.l, exec_lo
+// GFX1250: v_sqrt_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xf4,0x0a,0x7e]
-v_sqrt_bf16 v5, exec_hi
-// GFX1250: v_sqrt_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xf4,0x0a,0x7e]
+v_sqrt_bf16 v5.l, exec_hi
+// GFX1250: v_sqrt_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xf4,0x0a,0x7e]
-v_sqrt_bf16 v5, null
-// GFX1250: v_sqrt_bf16_e32 v5, null ; encoding: [0x7c,0xf4,0x0a,0x7e]
+v_sqrt_bf16 v5.l, null
+// GFX1250: v_sqrt_bf16_e32 v5.l, null ; encoding: [0x7c,0xf4,0x0a,0x7e]
-v_sqrt_bf16 v5, -1
-// GFX1250: v_sqrt_bf16_e32 v5, -1 ; encoding: [0xc1,0xf4,0x0a,0x7e]
+v_sqrt_bf16 v5.l, -1
+// GFX1250: v_sqrt_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xf4,0x0a,0x7e]
-v_sqrt_bf16 v5, 0.5
-// GFX1250: v_sqrt_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xf4,0x0a,0x7e]
+v_sqrt_bf16 v5.l, 0.5
+// GFX1250: v_sqrt_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xf4,0x0a,0x7e]
-v_sqrt_bf16 v5, src_scc
-// GFX1250: v_sqrt_bf16_e32 v5, src_scc ; encoding: [0xfd,0xf4,0x0a,0x7e]
+v_sqrt_bf16 v5.l, src_scc
+// GFX1250: v_sqrt_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xf4,0x0a,0x7e]
-v_sqrt_bf16 v127, 0x8000
-// GFX1250: v_sqrt_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xf4,0xfe,0x7e,0x00,0x80,0x00,0x00]
+v_sqrt_bf16 v127.l, 0x8000
+// GFX1250: v_sqrt_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xf4,0xfe,0x7e,0x00,0x80,0x00,0x00]
v_sqrt_bf16 v5.h, v1.h
// GFX1250: v_sqrt_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xf5,0x0a,0x7f]
-v_rsq_bf16 v5, v1
-// GFX1250: v_rsq_bf16_e32 v5, v1 ; encoding: [0x01,0xf7,0x0a,0x7e]
+v_rsq_bf16 v5.l, v1.l
+// GFX1250: v_rsq_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xf7,0x0a,0x7e]
-v_rsq_bf16 v5, v127
-// GFX1250: v_rsq_bf16_e32 v5, v127 ; encoding: [0x7f,0xf7,0x0a,0x7e]
+v_rsq_bf16 v5.l, v127.l
+// GFX1250: v_rsq_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xf7,0x0a,0x7e]
-v_rsq_bf16 v5, s1
-// GFX1250: v_rsq_bf16_e32 v5, s1 ; encoding: [0x01,0xf6,0x0a,0x7e]
+v_rsq_bf16 v5.l, s1
+// GFX1250: v_rsq_bf16_e32 v5.l, s1 ; encoding: [0x01,0xf6,0x0a,0x7e]
-v_rsq_bf16 v5, s105
-// GFX1250: v_rsq_bf16_e32 v5, s105 ; encoding: [0x69,0xf6,0x0a,0x7e]
+v_rsq_bf16 v5.l, s105
+// GFX1250: v_rsq_bf16_e32 v5.l, s105 ; encoding: [0x69,0xf6,0x0a,0x7e]
-v_rsq_bf16 v5, vcc_lo
-// GFX1250: v_rsq_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xf6,0x0a,0x7e]
+v_rsq_bf16 v5.l, vcc_lo
+// GFX1250: v_rsq_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xf6,0x0a,0x7e]
-v_rsq_bf16 v5, vcc_hi
-// GFX1250: v_rsq_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xf6,0x0a,0x7e]
+v_rsq_bf16 v5.l, vcc_hi
+// GFX1250: v_rsq_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xf6,0x0a,0x7e]
-v_rsq_bf16 v5, ttmp15
-// GFX1250: v_rsq_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xf6,0x0a,0x7e]
+v_rsq_bf16 v5.l, ttmp15
+// GFX1250: v_rsq_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xf6,0x0a,0x7e]
-v_rsq_bf16 v5, m0
-// GFX1250: v_rsq_bf16_e32 v5, m0 ; encoding: [0x7d,0xf6,0x0a,0x7e]
+v_rsq_bf16 v5.l, m0
+// GFX1250: v_rsq_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xf6,0x0a,0x7e]
-v_rsq_bf16 v5, exec_lo
-// GFX1250: v_rsq_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xf6,0x0a,0x7e]
+v_rsq_bf16 v5.l, exec_lo
+// GFX1250: v_rsq_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xf6,0x0a,0x7e]
-v_rsq_bf16 v5, exec_hi
-// GFX1250: v_rsq_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xf6,0x0a,0x7e]
+v_rsq_bf16 v5.l, exec_hi
+// GFX1250: v_rsq_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xf6,0x0a,0x7e]
-v_rsq_bf16 v5, null
-// GFX1250: v_rsq_bf16_e32 v5, null ; encoding: [0x7c,0xf6,0x0a,0x7e]
+v_rsq_bf16 v5.l, null
+// GFX1250: v_rsq_bf16_e32 v5.l, null ; encoding: [0x7c,0xf6,0x0a,0x7e]
-v_rsq_bf16 v5, -1
-// GFX1250: v_rsq_bf16_e32 v5, -1 ; encoding: [0xc1,0xf6,0x0a,0x7e]
+v_rsq_bf16 v5.l, -1
+// GFX1250: v_rsq_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xf6,0x0a,0x7e]
-v_rsq_bf16 v5, 0.5
-// GFX1250: v_rsq_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xf6,0x0a,0x7e]
+v_rsq_bf16 v5.l, 0.5
+// GFX1250: v_rsq_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xf6,0x0a,0x7e]
-v_rsq_bf16 v5, src_scc
-// GFX1250: v_rsq_bf16_e32 v5, src_scc ; encoding: [0xfd,0xf6,0x0a,0x7e]
+v_rsq_bf16 v5.l, src_scc
+// GFX1250: v_rsq_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xf6,0x0a,0x7e]
-v_rsq_bf16 v127, 0x8000
-// GFX1250: v_rsq_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xf6,0xfe,0x7e,0x00,0x80,0x00,0x00]
+v_rsq_bf16 v127.l, 0x8000
+// GFX1250: v_rsq_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xf6,0xfe,0x7e,0x00,0x80,0x00,0x00]
v_rsq_bf16 v5.h, v1.h
// GFX1250: v_rsq_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xf7,0x0a,0x7f]
-v_log_bf16 v5, v1
-// GFX1250: v_log_bf16_e32 v5, v1 ; encoding: [0x01,0xf9,0x0a,0x7e]
+v_log_bf16 v5.l, v1.l
+// GFX1250: v_log_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xf9,0x0a,0x7e]
-v_log_bf16 v5, v127
-// GFX1250: v_log_bf16_e32 v5, v127 ; encoding: [0x7f,0xf9,0x0a,0x7e]
+v_log_bf16 v5.l, v127.l
+// GFX1250: v_log_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xf9,0x0a,0x7e]
-v_log_bf16 v5, s1
-// GFX1250: v_log_bf16_e32 v5, s1 ; encoding: [0x01,0xf8,0x0a,0x7e]
+v_log_bf16 v5.l, s1
+// GFX1250: v_log_bf16_e32 v5.l, s1 ; encoding: [0x01,0xf8,0x0a,0x7e]
-v_log_bf16 v5, s105
-// GFX1250: v_log_bf16_e32 v5, s105 ; encoding: [0x69,0xf8,0x0a,0x7e]
+v_log_bf16 v5.l, s105
+// GFX1250: v_log_bf16_e32 v5.l, s105 ; encoding: [0x69,0xf8,0x0a,0x7e]
-v_log_bf16 v5, vcc_lo
-// GFX1250: v_log_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xf8,0x0a,0x7e]
+v_log_bf16 v5.l, vcc_lo
+// GFX1250: v_log_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xf8,0x0a,0x7e]
-v_log_bf16 v5, vcc_hi
-// GFX1250: v_log_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xf8,0x0a,0x7e]
+v_log_bf16 v5.l, vcc_hi
+// GFX1250: v_log_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xf8,0x0a,0x7e]
-v_log_bf16 v5, ttmp15
-// GFX1250: v_log_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xf8,0x0a,0x7e]
+v_log_bf16 v5.l, ttmp15
+// GFX1250: v_log_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xf8,0x0a,0x7e]
-v_log_bf16 v5, m0
-// GFX1250: v_log_bf16_e32 v5, m0 ; encoding: [0x7d,0xf8,0x0a,0x7e]
+v_log_bf16 v5.l, m0
+// GFX1250: v_log_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xf8,0x0a,0x7e]
-v_log_bf16 v5, exec_lo
-// GFX1250: v_log_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xf8,0x0a,0x7e]
+v_log_bf16 v5.l, exec_lo
+// GFX1250: v_log_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xf8,0x0a,0x7e]
-v_log_bf16 v5, exec_hi
-// GFX1250: v_log_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xf8,0x0a,0x7e]
+v_log_bf16 v5.l, exec_hi
+// GFX1250: v_log_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xf8,0x0a,0x7e]
-v_log_bf16 v5, null
-// GFX1250: v_log_bf16_e32 v5, null ; encoding: [0x7c,0xf8,0x0a,0x7e]
+v_log_bf16 v5.l, null
+// GFX1250: v_log_bf16_e32 v5.l, null ; encoding: [0x7c,0xf8,0x0a,0x7e]
-v_log_bf16 v5, -1
-// GFX1250: v_log_bf16_e32 v5, -1 ; encoding: [0xc1,0xf8,0x0a,0x7e]
+v_log_bf16 v5.l, -1
+// GFX1250: v_log_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xf8,0x0a,0x7e]
-v_log_bf16 v5, 0.5
-// GFX1250: v_log_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xf8,0x0a,0x7e]
+v_log_bf16 v5.l, 0.5
+// GFX1250: v_log_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xf8,0x0a,0x7e]
-v_log_bf16 v5, src_scc
-// GFX1250: v_log_bf16_e32 v5, src_scc ; encoding: [0xfd,0xf8,0x0a,0x7e]
+v_log_bf16 v5.l, src_scc
+// GFX1250: v_log_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xf8,0x0a,0x7e]
-v_log_bf16 v127, 0x8000
-// GFX1250: v_log_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xf8,0xfe,0x7e,0x00,0x80,0x00,0x00]
+v_log_bf16 v127.l, 0x8000
+// GFX1250: v_log_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xf8,0xfe,0x7e,0x00,0x80,0x00,0x00]
v_log_bf16 v5.h, v1.h
// GFX1250: v_log_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xf9,0x0a,0x7f]
-v_exp_bf16 v5, v1
-// GFX1250: v_exp_bf16_e32 v5, v1 ; encoding: [0x01,0xfb,0x0a,0x7e]
+v_exp_bf16 v5.l, v1.l
+// GFX1250: v_exp_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xfb,0x0a,0x7e]
-v_exp_bf16 v5, v127
-// GFX1250: v_exp_bf16_e32 v5, v127 ; encoding: [0x7f,0xfb,0x0a,0x7e]
+v_exp_bf16 v5.l, v127.l
+// GFX1250: v_exp_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xfb,0x0a,0x7e]
-v_exp_bf16 v5, s1
-// GFX1250: v_exp_bf16_e32 v5, s1 ; encoding: [0x01,0xfa,0x0a,0x7e]
+v_exp_bf16 v5.l, s1
+// GFX1250: v_exp_bf16_e32 v5.l, s1 ; encoding: [0x01,0xfa,0x0a,0x7e]
-v_exp_bf16 v5, s105
-// GFX1250: v_exp_bf16_e32 v5, s105 ; encoding: [0x69,0xfa,0x0a,0x7e]
+v_exp_bf16 v5.l, s105
+// GFX1250: v_exp_bf16_e32 v5.l, s105 ; encoding: [0x69,0xfa,0x0a,0x7e]
-v_exp_bf16 v5, vcc_lo
-// GFX1250: v_exp_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xfa,0x0a,0x7e]
+v_exp_bf16 v5.l, vcc_lo
+// GFX1250: v_exp_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xfa,0x0a,0x7e]
-v_exp_bf16 v5, vcc_hi
-// GFX1250: v_exp_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xfa,0x0a,0x7e]
+v_exp_bf16 v5.l, vcc_hi
+// GFX1250: v_exp_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xfa,0x0a,0x7e]
-v_exp_bf16 v5, ttmp15
-// GFX1250: v_exp_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xfa,0x0a,0x7e]
+v_exp_bf16 v5.l, ttmp15
+// GFX1250: v_exp_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xfa,0x0a,0x7e]
-v_exp_bf16 v5, m0
-// GFX1250: v_exp_bf16_e32 v5, m0 ; encoding: [0x7d,0xfa,0x0a,0x7e]
+v_exp_bf16 v5.l, m0
+// GFX1250: v_exp_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xfa,0x0a,0x7e]
-v_exp_bf16 v5, exec_lo
-// GFX1250: v_exp_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xfa,0x0a,0x7e]
+v_exp_bf16 v5.l, exec_lo
+// GFX1250: v_exp_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xfa,0x0a,0x7e]
-v_exp_bf16 v5, exec_hi
-// GFX1250: v_exp_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xfa,0x0a,0x7e]
+v_exp_bf16 v5.l, exec_hi
+// GFX1250: v_exp_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xfa,0x0a,0x7e]
-v_exp_bf16 v5, null
-// GFX1250: v_exp_bf16_e32 v5, null ; encoding: [0x7c,0xfa,0x0a,0x7e]
+v_exp_bf16 v5.l, null
+// GFX1250: v_exp_bf16_e32 v5.l, null ; encoding: [0x7c,0xfa,0x0a,0x7e]
-v_exp_bf16 v5, -1
-// GFX1250: v_exp_bf16_e32 v5, -1 ; encoding: [0xc1,0xfa,0x0a,0x7e]
+v_exp_bf16 v5.l, -1
+// GFX1250: v_exp_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xfa,0x0a,0x7e]
-v_exp_bf16 v5, 0.5
-// GFX1250: v_exp_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xfa,0x0a,0x7e]
+v_exp_bf16 v5.l, 0.5
+// GFX1250: v_exp_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xfa,0x0a,0x7e]
-v_exp_bf16 v5, src_scc
-// GFX1250: v_exp_bf16_e32 v5, src_scc ; encoding: [0xfd,0xfa,0x0a,0x7e]
+v_exp_bf16 v5.l, src_scc
+// GFX1250: v_exp_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xfa,0x0a,0x7e]
-v_exp_bf16 v127, 0x8000
-// GFX1250: v_exp_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xfa,0xfe,0x7e,0x00,0x80,0x00,0x00]
+v_exp_bf16 v127.l, 0x8000
+// GFX1250: v_exp_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xfa,0xfe,0x7e,0x00,0x80,0x00,0x00]
v_exp_bf16 v5.h, v1.h
// GFX1250: v_exp_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xfb,0x0a,0x7f]
-v_sin_bf16 v5, v1
-// GFX1250: v_sin_bf16_e32 v5, v1 ; encoding: [0x01,0xfd,0x0a,0x7e]
+v_sin_bf16 v5.l, v1.l
+// GFX1250: v_sin_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xfd,0x0a,0x7e]
-v_sin_bf16 v5, v127
-// GFX1250: v_sin_bf16_e32 v5, v127 ; encoding: [0x7f,0xfd,0x0a,0x7e]
+v_sin_bf16 v5.l, v127.l
+// GFX1250: v_sin_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xfd,0x0a,0x7e]
-v_sin_bf16 v5, s1
-// GFX1250: v_sin_bf16_e32 v5, s1 ; encoding: [0x01,0xfc,0x0a,0x7e]
+v_sin_bf16 v5.l, s1
+// GFX1250: v_sin_bf16_e32 v5.l, s1 ; encoding: [0x01,0xfc,0x0a,0x7e]
-v_sin_bf16 v5, s105
-// GFX1250: v_sin_bf16_e32 v5, s105 ; encoding: [0x69,0xfc,0x0a,0x7e]
+v_sin_bf16 v5.l, s105
+// GFX1250: v_sin_bf16_e32 v5.l, s105 ; encoding: [0x69,0xfc,0x0a,0x7e]
-v_sin_bf16 v5, vcc_lo
-// GFX1250: v_sin_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xfc,0x0a,0x7e]
+v_sin_bf16 v5.l, vcc_lo
+// GFX1250: v_sin_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xfc,0x0a,0x7e]
-v_sin_bf16 v5, vcc_hi
-// GFX1250: v_sin_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xfc,0x0a,0x7e]
+v_sin_bf16 v5.l, vcc_hi
+// GFX1250: v_sin_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xfc,0x0a,0x7e]
-v_sin_bf16 v5, ttmp15
-// GFX1250: v_sin_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xfc,0x0a,0x7e]
+v_sin_bf16 v5.l, ttmp15
+// GFX1250: v_sin_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xfc,0x0a,0x7e]
-v_sin_bf16 v5, m0
-// GFX1250: v_sin_bf16_e32 v5, m0 ; encoding: [0x7d,0xfc,0x0a,0x7e]
+v_sin_bf16 v5.l, m0
+// GFX1250: v_sin_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xfc,0x0a,0x7e]
-v_sin_bf16 v5, exec_lo
-// GFX1250: v_sin_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xfc,0x0a,0x7e]
+v_sin_bf16 v5.l, exec_lo
+// GFX1250: v_sin_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xfc,0x0a,0x7e]
-v_sin_bf16 v5, exec_hi
-// GFX1250: v_sin_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xfc,0x0a,0x7e]
+v_sin_bf16 v5.l, exec_hi
+// GFX1250: v_sin_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xfc,0x0a,0x7e]
-v_sin_bf16 v5, null
-// GFX1250: v_sin_bf16_e32 v5, null ; encoding: [0x7c,0xfc,0x0a,0x7e]
+v_sin_bf16 v5.l, null
+// GFX1250: v_sin_bf16_e32 v5.l, null ; encoding: [0x7c,0xfc,0x0a,0x7e]
-v_sin_bf16 v5, -1
-// GFX1250: v_sin_bf16_e32 v5, -1 ; encoding: [0xc1,0xfc,0x0a,0x7e]
+v_sin_bf16 v5.l, -1
+// GFX1250: v_sin_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xfc,0x0a,0x7e]
-v_sin_bf16 v5, 0.5
-// GFX1250: v_sin_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xfc,0x0a,0x7e]
+v_sin_bf16 v5.l, 0.5
+// GFX1250: v_sin_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xfc,0x0a,0x7e]
-v_sin_bf16 v5, src_scc
-// GFX1250: v_sin_bf16_e32 v5, src_scc ; encoding: [0xfd,0xfc,0x0a,0x7e]
+v_sin_bf16 v5.l, src_scc
+// GFX1250: v_sin_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xfc,0x0a,0x7e]
-v_sin_bf16 v127, 0x8000
-// GFX1250: v_sin_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xfc,0xfe,0x7e,0x00,0x80,0x00,0x00]
+v_sin_bf16 v127.l, 0x8000
+// GFX1250: v_sin_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xfc,0xfe,0x7e,0x00,0x80,0x00,0x00]
v_sin_bf16 v5.h, v1.h
// GFX1250: v_sin_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xfd,0x0a,0x7f]
-v_cos_bf16 v5, v1
-// GFX1250: v_cos_bf16_e32 v5, v1 ; encoding: [0x01,0xff,0x0a,0x7e]
+v_cos_bf16 v5.l, v1.l
+// GFX1250: v_cos_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xff,0x0a,0x7e]
-v_cos_bf16 v5, v127
-// GFX1250: v_cos_bf16_e32 v5, v127 ; encoding: [0x7f,0xff,0x0a,0x7e]
+v_cos_bf16 v5.l, v127.l
+// GFX1250: v_cos_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xff,0x0a,0x7e]
-v_cos_bf16 v5, s1
-// GFX1250: v_cos_bf16_e32 v5, s1 ; encoding: [0x01,0xfe,0x0a,0x7e]
+v_cos_bf16 v5.l, s1
+// GFX1250: v_cos_bf16_e32 v5.l, s1 ; encoding: [0x01,0xfe,0x0a,0x7e]
-v_cos_bf16 v5, s105
-// GFX1250: v_cos_bf16_e32 v5, s105 ; encoding: [0x69,0xfe,0x0a,0x7e]
+v_cos_bf16 v5.l, s105
+// GFX1250: v_cos_bf16_e32 v5.l, s105 ; encoding: [0x69,0xfe,0x0a,0x7e]
-v_cos_bf16 v5, vcc_lo
-// GFX1250: v_cos_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xfe,0x0a,0x7e]
+v_cos_bf16 v5.l, vcc_lo
+// GFX1250: v_cos_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xfe,0x0a,0x7e]
-v_cos_bf16 v5, vcc_hi
-// GFX1250: v_cos_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xfe,0x0a,0x7e]
+v_cos_bf16 v5.l, vcc_hi
+// GFX1250: v_cos_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xfe,0x0a,0x7e]
-v_cos_bf16 v5, ttmp15
-// GFX1250: v_cos_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xfe,0x0a,0x7e]
+v_cos_bf16 v5.l, ttmp15
+// GFX1250: v_cos_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xfe,0x0a,0x7e]
-v_cos_bf16 v5, m0
-// GFX1250: v_cos_bf16_e32 v5, m0 ; encoding: [0x7d,0xfe,0x0a,0x7e]
+v_cos_bf16 v5.l, m0
+// GFX1250: v_cos_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xfe,0x0a,0x7e]
-v_cos_bf16 v5, exec_lo
-// GFX1250: v_cos_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xfe,0x0a,0x7e]
+v_cos_bf16 v5.l, exec_lo
+// GFX1250: v_cos_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xfe,0x0a,0x7e]
-v_cos_bf16 v5, exec_hi
-// GFX1250: v_cos_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xfe,0x0a,0x7e]
+v_cos_bf16 v5.l, exec_hi
+// GFX1250: v_cos_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xfe,0x0a,0x7e]
-v_cos_bf16 v5, null
-// GFX1250: v_cos_bf16_e32 v5, null ; encoding: [0x7c,0xfe,0x0a,0x7e]
+v_cos_bf16 v5.l, null
+// GFX1250: v_cos_bf16_e32 v5.l, null ; encoding: [0x7c,0xfe,0x0a,0x7e]
-v_cos_bf16 v5, -1
-// GFX1250: v_cos_bf16_e32 v5, -1 ; encoding: [0xc1,0xfe,0x0a,0x7e]
+v_cos_bf16 v5.l, -1
+// GFX1250: v_cos_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xfe,0x0a,0x7e]
-v_cos_bf16 v5, 0.5
-// GFX1250: v_cos_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xfe,0x0a,0x7e]
+v_cos_bf16 v5.l, 0.5
+// GFX1250: v_cos_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xfe,0x0a,0x7e]
-v_cos_bf16 v5, src_scc
-// GFX1250: v_cos_bf16_e32 v5, src_scc ; encoding: [0xfd,0xfe,0x0a,0x7e]
+v_cos_bf16 v5.l, src_scc
+// GFX1250: v_cos_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xfe,0x0a,0x7e]
-v_cos_bf16 v127, 0x8000
-// GFX1250: v_cos_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xfe,0xfe,0x7e,0x00,0x80,0x00,0x00]
+v_cos_bf16 v127.l, 0x8000
+// GFX1250: v_cos_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xfe,0xfe,0x7e,0x00,0x80,0x00,0x00]
v_cos_bf16 v5.h, v1.h
// GFX1250: v_cos_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xff,0x0a,0x7f]
-v_cvt_f32_bf16 v5, v1
-// GFX1250: v_cvt_f32_bf16_e32 v5, v1 ; encoding: [0x01,0xe5,0x0a,0x7e]
+v_cvt_f32_bf16 v5, v1.l
+// GFX1250: v_cvt_f32_bf16_e32 v5, v1.l ; encoding: [0x01,0xe5,0x0a,0x7e]
-v_cvt_f32_bf16 v5, v127
-// GFX1250: v_cvt_f32_bf16_e32 v5, v127 ; encoding: [0x7f,0xe5,0x0a,0x7e]
+v_cvt_f32_bf16 v5, v127.l
+// GFX1250: v_cvt_f32_bf16_e32 v5, v127.l ; encoding: [0x7f,0xe5,0x0a,0x7e]
v_cvt_f32_bf16 v5, s1
// GFX1250: v_cvt_f32_bf16_e32 v5, s1 ; encoding: [0x01,0xe4,0x0a,0x7e]
@@ -676,11 +676,11 @@ v_cvt_pk_f32_bf8_e32 v[2:3], 3
v_cvt_pk_f32_bf8_e32 v[4:5], 3
// GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], 3 ; encoding: [0x83,0xde,0x08,0x7e]
-v_cvt_pk_f32_bf8_e32 v[2:3], v3
-// GFX1250: v_cvt_pk_f32_bf8_e32 v[2:3], v3 ; encoding: [0x03,0xdf,0x04,0x7e]
+v_cvt_pk_f32_bf8_e32 v[2:3], v3.l
+// GFX1250: v_cvt_pk_f32_bf8_e32 v[2:3], v3.l ; encoding: [0x03,0xdf,0x04,0x7e]
-v_cvt_pk_f32_bf8_e32 v[4:5], v3
-// GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], v3 ; encoding: [0x03,0xdf,0x08,0x7e]
+v_cvt_pk_f32_bf8_e32 v[4:5], v3.l
+// GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], v3.l ; encoding: [0x03,0xdf,0x08,0x7e]
v_cvt_pk_f32_bf8_e32 v[4:5], v127.h
// GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], v127.h ; encoding: [0xff,0xdf,0x08,0x7e]
@@ -703,32 +703,32 @@ v_cvt_pk_f32_fp8_e32 v[4:5], v127.h
v_cvt_pk_f32_fp8_e32 v[4:5], v127.l
// GFX1250: v_cvt_pk_f32_fp8_e32 v[4:5], v127.l ; encoding: [0x7f,0xdd,0x08,0x7e]
-v_sat_pk4_i4_i8 v1, v2
-// GFX1250: v_sat_pk4_i4_i8_e32 v1, v2 ; encoding: [0x02,0xe7,0x02,0x7e]
+v_sat_pk4_i4_i8 v1.l, v2
+// GFX1250: v_sat_pk4_i4_i8_e32 v1.l, v2 ; encoding: [0x02,0xe7,0x02,0x7e]
-v_sat_pk4_i4_i8 v1, s2
-// GFX1250: v_sat_pk4_i4_i8_e32 v1, s2 ; encoding: [0x02,0xe6,0x02,0x7e]
+v_sat_pk4_i4_i8 v1.l, s2
+// GFX1250: v_sat_pk4_i4_i8_e32 v1.l, s2 ; encoding: [0x02,0xe6,0x02,0x7e]
-v_sat_pk4_i4_i8 v1, 2
-// GFX1250: v_sat_pk4_i4_i8_e32 v1, 2 ; encoding: [0x82,0xe6,0x02,0x7e]
+v_sat_pk4_i4_i8 v1.l, 2
+// GFX1250: v_sat_pk4_i4_i8_e32 v1.l, 2 ; encoding: [0x82,0xe6,0x02,0x7e]
-v_sat_pk4_i4_i8 v1, 0x1234
-// GFX1250: v_sat_pk4_i4_i8_e32 v1, 0x1234 ; encoding: [0xff,0xe6,0x02,0x7e,0x34,0x12,0x00,0x00]
+v_sat_pk4_i4_i8 v1.l, 0x1234
+// GFX1250: v_sat_pk4_i4_i8_e32 v1.l, 0x1234 ; encoding: [0xff,0xe6,0x02,0x7e,0x34,0x12,0x00,0x00]
v_sat_pk4_i4_i8 v1.h, v2
// GFX1250: v_sat_pk4_i4_i8_e32 v1.h, v2 ; encoding: [0x02,0xe7,0x02,0x7f]
-v_sat_pk4_u4_u8 v1, v2
-// GFX1250: v_sat_pk4_u4_u8_e32 v1, v2 ; encoding: [0x02,0xe9,0x02,0x7e]
+v_sat_pk4_u4_u8 v1.l, v2
+// GFX1250: v_sat_pk4_u4_u8_e32 v1.l, v2 ; encoding: [0x02,0xe9,0x02,0x7e]
-v_sat_pk4_u4_u8 v1, s2
-// GFX1250: v_sat_pk4_u4_u8_e32 v1, s2 ; encoding: [0x02,0xe8,0x02,0x7e]
+v_sat_pk4_u4_u8 v1.l, s2
+// GFX1250: v_sat_pk4_u4_u8_e32 v1.l, s2 ; encoding: [0x02,0xe8,0x02,0x7e]
-v_sat_pk4_u4_u8 v1, 2
-// GFX1250: v_sat_pk4_u4_u8_e32 v1, 2 ; encoding: [0x82,0xe8,0x02,0x7e]
+v_sat_pk4_u4_u8 v1.l, 2
+// GFX1250: v_sat_pk4_u4_u8_e32 v1.l, 2 ; encoding: [0x82,0xe8,0x02,0x7e]
-v_sat_pk4_u4_u8 v1, 0x1234
-// GFX1250: v_sat_pk4_u4_u8_e32 v1, 0x1234 ; encoding: [0xff,0xe8,0x02,0x7e,0x34,0x12,0x00,0x00]
+v_sat_pk4_u4_u8 v1.l, 0x1234
+// GFX1250: v_sat_pk4_u4_u8_e32 v1.l, 0x1234 ; encoding: [0xff,0xe8,0x02,0x7e,0x34,0x12,0x00,0x00]
v_sat_pk4_u4_u8 v1.h, v2
// GFX1250: v_sat_pk4_u4_u8_e32 v1.h, v2 ; encoding: [0x02,0xe9,0x02,0x7f]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s
index 0a46f2f..592619f 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s
@@ -58,120 +58,120 @@ v_tanh_f32 v255, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi
// GFX1250: v_tanh_f32_dpp v255, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x3c,0xfe,0x7f,0xff,0x6f,0x35,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_tanh_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x1b,0x00,0xff]
+v_tanh_f16 v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_tanh_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0xff]
+v_tanh_f16 v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 row_mirror
-// GFX1250: v_tanh_f16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x40,0x01,0xff]
+v_tanh_f16 v5.l, v1.l row_mirror
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 row_half_mirror
-// GFX1250: v_tanh_f16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x41,0x01,0xff]
+v_tanh_f16 v5.l, v1.l row_half_mirror
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 row_shl:1
-// GFX1250: v_tanh_f16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x01,0x01,0xff]
+v_tanh_f16 v5.l, v1.l row_shl:1
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 row_shl:15
-// GFX1250: v_tanh_f16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x0f,0x01,0xff]
+v_tanh_f16 v5.l, v1.l row_shl:15
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 row_shr:1
-// GFX1250: v_tanh_f16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x11,0x01,0xff]
+v_tanh_f16 v5.l, v1.l row_shr:1
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 row_shr:15
-// GFX1250: v_tanh_f16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x1f,0x01,0xff]
+v_tanh_f16 v5.l, v1.l row_shr:15
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 row_ror:1
-// GFX1250: v_tanh_f16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x21,0x01,0xff]
+v_tanh_f16 v5.l, v1.l row_ror:1
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 row_ror:15
-// GFX1250: v_tanh_f16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x2f,0x01,0xff]
+v_tanh_f16 v5.l, v1.l row_ror:15
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_tanh_f16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x50,0x01,0xff]
+v_tanh_f16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_tanh_f16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x5f,0x01,0x01]
+v_tanh_f16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_tanh_f16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x60,0x09,0x13]
+v_tanh_f16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_tanh_f16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x3e,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
+v_tanh_f16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_tanh_f16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x3e,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_tanh_f16 v5.h, v1.h quad_perm:[3,2,1,0]
// GFX1250: v_tanh_f16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7f,0x81,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_tanh_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x1b,0x00,0xff]
+v_tanh_bf16 v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_tanh_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0xff]
+v_tanh_bf16 v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 row_mirror
-// GFX1250: v_tanh_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x40,0x01,0xff]
+v_tanh_bf16 v5.l, v1.l row_mirror
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 row_half_mirror
-// GFX1250: v_tanh_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x41,0x01,0xff]
+v_tanh_bf16 v5.l, v1.l row_half_mirror
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 row_shl:1
-// GFX1250: v_tanh_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x01,0x01,0xff]
+v_tanh_bf16 v5.l, v1.l row_shl:1
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 row_shl:15
-// GFX1250: v_tanh_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x0f,0x01,0xff]
+v_tanh_bf16 v5.l, v1.l row_shl:15
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 row_shr:1
-// GFX1250: v_tanh_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x11,0x01,0xff]
+v_tanh_bf16 v5.l, v1.l row_shr:1
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 row_shr:15
-// GFX1250: v_tanh_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x1f,0x01,0xff]
+v_tanh_bf16 v5.l, v1.l row_shr:15
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 row_ror:1
-// GFX1250: v_tanh_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x21,0x01,0xff]
+v_tanh_bf16 v5.l, v1.l row_ror:1
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 row_ror:15
-// GFX1250: v_tanh_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x2f,0x01,0xff]
+v_tanh_bf16 v5.l, v1.l row_ror:15
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_tanh_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x50,0x01,0xff]
+v_tanh_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_tanh_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x5f,0x01,0x01]
+v_tanh_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_tanh_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x60,0x09,0x13]
+v_tanh_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_tanh_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x94,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
+v_tanh_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_tanh_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x94,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_tanh_bf16 v5.h, v1.h quad_perm:[3,2,1,0]
@@ -230,480 +230,480 @@ v_prng_b32 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
// GFX1250: v_prng_b32_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x96,0x0a,0x7e,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_rcp_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x1b,0x00,0xff]
+v_rcp_bf16 v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_rcp_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0xe4,0x00,0xff]
+v_rcp_bf16 v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 row_mirror
-// GFX1250: v_rcp_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x40,0x01,0xff]
+v_rcp_bf16 v5.l, v1.l row_mirror
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 row_half_mirror
-// GFX1250: v_rcp_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x41,0x01,0xff]
+v_rcp_bf16 v5.l, v1.l row_half_mirror
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 row_shl:1
-// GFX1250: v_rcp_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x01,0x01,0xff]
+v_rcp_bf16 v5.l, v1.l row_shl:1
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 row_shl:15
-// GFX1250: v_rcp_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x0f,0x01,0xff]
+v_rcp_bf16 v5.l, v1.l row_shl:15
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 row_shr:1
-// GFX1250: v_rcp_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x11,0x01,0xff]
+v_rcp_bf16 v5.l, v1.l row_shr:1
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 row_shr:15
-// GFX1250: v_rcp_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x1f,0x01,0xff]
+v_rcp_bf16 v5.l, v1.l row_shr:15
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 row_ror:1
-// GFX1250: v_rcp_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x21,0x01,0xff]
+v_rcp_bf16 v5.l, v1.l row_ror:1
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 row_ror:15
-// GFX1250: v_rcp_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x2f,0x01,0xff]
+v_rcp_bf16 v5.l, v1.l row_ror:15
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_rcp_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x50,0x01,0xff]
+v_rcp_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_rcp_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x5f,0x01,0x01]
+v_rcp_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_rcp_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x60,0x09,0x13]
+v_rcp_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_rcp_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf2,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
+v_rcp_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_rcp_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf2,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_rcp_bf16 v5.h, v1.h quad_perm:[3,2,1,0]
// GFX1250: v_rcp_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7f,0x81,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_sqrt_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x1b,0x00,0xff]
+v_sqrt_bf16 v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_sqrt_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0xe4,0x00,0xff]
+v_sqrt_bf16 v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 row_mirror
-// GFX1250: v_sqrt_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x40,0x01,0xff]
+v_sqrt_bf16 v5.l, v1.l row_mirror
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 row_half_mirror
-// GFX1250: v_sqrt_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x41,0x01,0xff]
+v_sqrt_bf16 v5.l, v1.l row_half_mirror
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 row_shl:1
-// GFX1250: v_sqrt_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x01,0x01,0xff]
+v_sqrt_bf16 v5.l, v1.l row_shl:1
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 row_shl:15
-// GFX1250: v_sqrt_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x0f,0x01,0xff]
+v_sqrt_bf16 v5.l, v1.l row_shl:15
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 row_shr:1
-// GFX1250: v_sqrt_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x11,0x01,0xff]
+v_sqrt_bf16 v5.l, v1.l row_shr:1
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 row_shr:15
-// GFX1250: v_sqrt_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x1f,0x01,0xff]
+v_sqrt_bf16 v5.l, v1.l row_shr:15
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 row_ror:1
-// GFX1250: v_sqrt_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x21,0x01,0xff]
+v_sqrt_bf16 v5.l, v1.l row_ror:1
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 row_ror:15
-// GFX1250: v_sqrt_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x2f,0x01,0xff]
+v_sqrt_bf16 v5.l, v1.l row_ror:15
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_sqrt_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x50,0x01,0xff]
+v_sqrt_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_sqrt_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x5f,0x01,0x01]
+v_sqrt_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_sqrt_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x60,0x09,0x13]
+v_sqrt_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_sqrt_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf4,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
+v_sqrt_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_sqrt_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf4,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sqrt_bf16 v5.h, v1.h quad_perm:[3,2,1,0]
// GFX1250: v_sqrt_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7f,0x81,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_rsq_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x1b,0x00,0xff]
+v_rsq_bf16 v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_rsq_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0xe4,0x00,0xff]
+v_rsq_bf16 v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 row_mirror
-// GFX1250: v_rsq_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x40,0x01,0xff]
+v_rsq_bf16 v5.l, v1.l row_mirror
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 row_half_mirror
-// GFX1250: v_rsq_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x41,0x01,0xff]
+v_rsq_bf16 v5.l, v1.l row_half_mirror
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 row_shl:1
-// GFX1250: v_rsq_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x01,0x01,0xff]
+v_rsq_bf16 v5.l, v1.l row_shl:1
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 row_shl:15
-// GFX1250: v_rsq_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x0f,0x01,0xff]
+v_rsq_bf16 v5.l, v1.l row_shl:15
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 row_shr:1
-// GFX1250: v_rsq_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x11,0x01,0xff]
+v_rsq_bf16 v5.l, v1.l row_shr:1
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 row_shr:15
-// GFX1250: v_rsq_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x1f,0x01,0xff]
+v_rsq_bf16 v5.l, v1.l row_shr:15
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 row_ror:1
-// GFX1250: v_rsq_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x21,0x01,0xff]
+v_rsq_bf16 v5.l, v1.l row_ror:1
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 row_ror:15
-// GFX1250: v_rsq_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x2f,0x01,0xff]
+v_rsq_bf16 v5.l, v1.l row_ror:15
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_rsq_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x50,0x01,0xff]
+v_rsq_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_rsq_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x5f,0x01,0x01]
+v_rsq_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_rsq_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x60,0x09,0x13]
+v_rsq_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_rsq_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf6,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
+v_rsq_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_rsq_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf6,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_rsq_bf16 v5.h, v1.h quad_perm:[3,2,1,0]
// GFX1250: v_rsq_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7f,0x81,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_log_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x1b,0x00,0xff]
+v_log_bf16 v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_log_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_log_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0xe4,0x00,0xff]
+v_log_bf16 v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_log_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 row_mirror
-// GFX1250: v_log_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x40,0x01,0xff]
+v_log_bf16 v5.l, v1.l row_mirror
+// GFX1250: v_log_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 row_half_mirror
-// GFX1250: v_log_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x41,0x01,0xff]
+v_log_bf16 v5.l, v1.l row_half_mirror
+// GFX1250: v_log_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 row_shl:1
-// GFX1250: v_log_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x01,0x01,0xff]
+v_log_bf16 v5.l, v1.l row_shl:1
+// GFX1250: v_log_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 row_shl:15
-// GFX1250: v_log_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x0f,0x01,0xff]
+v_log_bf16 v5.l, v1.l row_shl:15
+// GFX1250: v_log_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 row_shr:1
-// GFX1250: v_log_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x11,0x01,0xff]
+v_log_bf16 v5.l, v1.l row_shr:1
+// GFX1250: v_log_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 row_shr:15
-// GFX1250: v_log_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x1f,0x01,0xff]
+v_log_bf16 v5.l, v1.l row_shr:15
+// GFX1250: v_log_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 row_ror:1
-// GFX1250: v_log_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x21,0x01,0xff]
+v_log_bf16 v5.l, v1.l row_ror:1
+// GFX1250: v_log_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 row_ror:15
-// GFX1250: v_log_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x2f,0x01,0xff]
+v_log_bf16 v5.l, v1.l row_ror:15
+// GFX1250: v_log_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_log_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x50,0x01,0xff]
+v_log_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_log_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_log_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x5f,0x01,0x01]
+v_log_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_log_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_log_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x60,0x09,0x13]
+v_log_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_log_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_log_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf8,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
+v_log_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_log_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf8,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_log_bf16 v5.h, v1.h quad_perm:[3,2,1,0]
// GFX1250: v_log_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7f,0x81,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_exp_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x1b,0x00,0xff]
+v_exp_bf16 v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_exp_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0xe4,0x00,0xff]
+v_exp_bf16 v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 row_mirror
-// GFX1250: v_exp_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x40,0x01,0xff]
+v_exp_bf16 v5.l, v1.l row_mirror
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 row_half_mirror
-// GFX1250: v_exp_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x41,0x01,0xff]
+v_exp_bf16 v5.l, v1.l row_half_mirror
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 row_shl:1
-// GFX1250: v_exp_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x01,0x01,0xff]
+v_exp_bf16 v5.l, v1.l row_shl:1
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 row_shl:15
-// GFX1250: v_exp_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x0f,0x01,0xff]
+v_exp_bf16 v5.l, v1.l row_shl:15
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 row_shr:1
-// GFX1250: v_exp_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x11,0x01,0xff]
+v_exp_bf16 v5.l, v1.l row_shr:1
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 row_shr:15
-// GFX1250: v_exp_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x1f,0x01,0xff]
+v_exp_bf16 v5.l, v1.l row_shr:15
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 row_ror:1
-// GFX1250: v_exp_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x21,0x01,0xff]
+v_exp_bf16 v5.l, v1.l row_ror:1
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 row_ror:15
-// GFX1250: v_exp_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x2f,0x01,0xff]
+v_exp_bf16 v5.l, v1.l row_ror:15
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_exp_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x50,0x01,0xff]
+v_exp_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_exp_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x5f,0x01,0x01]
+v_exp_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_exp_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x60,0x09,0x13]
+v_exp_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_exp_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfa,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
+v_exp_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_exp_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfa,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_exp_bf16 v5.h, v1.h quad_perm:[3,2,1,0]
// GFX1250: v_exp_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7f,0x81,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_sin_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x1b,0x00,0xff]
+v_sin_bf16 v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_sin_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0xe4,0x00,0xff]
+v_sin_bf16 v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 row_mirror
-// GFX1250: v_sin_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x40,0x01,0xff]
+v_sin_bf16 v5.l, v1.l row_mirror
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 row_half_mirror
-// GFX1250: v_sin_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x41,0x01,0xff]
+v_sin_bf16 v5.l, v1.l row_half_mirror
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 row_shl:1
-// GFX1250: v_sin_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x01,0x01,0xff]
+v_sin_bf16 v5.l, v1.l row_shl:1
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 row_shl:15
-// GFX1250: v_sin_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x0f,0x01,0xff]
+v_sin_bf16 v5.l, v1.l row_shl:15
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 row_shr:1
-// GFX1250: v_sin_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x11,0x01,0xff]
+v_sin_bf16 v5.l, v1.l row_shr:1
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 row_shr:15
-// GFX1250: v_sin_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x1f,0x01,0xff]
+v_sin_bf16 v5.l, v1.l row_shr:15
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 row_ror:1
-// GFX1250: v_sin_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x21,0x01,0xff]
+v_sin_bf16 v5.l, v1.l row_ror:1
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 row_ror:15
-// GFX1250: v_sin_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x2f,0x01,0xff]
+v_sin_bf16 v5.l, v1.l row_ror:15
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_sin_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x50,0x01,0xff]
+v_sin_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_sin_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x5f,0x01,0x01]
+v_sin_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_sin_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x60,0x09,0x13]
+v_sin_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_sin_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfc,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
+v_sin_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_sin_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfc,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sin_bf16 v5.h, v1.h quad_perm:[3,2,1,0]
// GFX1250: v_sin_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7f,0x81,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_cos_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x1b,0x00,0xff]
+v_cos_bf16 v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_cos_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0xe4,0x00,0xff]
+v_cos_bf16 v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 row_mirror
-// GFX1250: v_cos_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x40,0x01,0xff]
+v_cos_bf16 v5.l, v1.l row_mirror
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 row_half_mirror
-// GFX1250: v_cos_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x41,0x01,0xff]
+v_cos_bf16 v5.l, v1.l row_half_mirror
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 row_shl:1
-// GFX1250: v_cos_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x01,0x01,0xff]
+v_cos_bf16 v5.l, v1.l row_shl:1
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 row_shl:15
-// GFX1250: v_cos_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x0f,0x01,0xff]
+v_cos_bf16 v5.l, v1.l row_shl:15
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 row_shr:1
-// GFX1250: v_cos_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x11,0x01,0xff]
+v_cos_bf16 v5.l, v1.l row_shr:1
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 row_shr:15
-// GFX1250: v_cos_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x1f,0x01,0xff]
+v_cos_bf16 v5.l, v1.l row_shr:15
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 row_ror:1
-// GFX1250: v_cos_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x21,0x01,0xff]
+v_cos_bf16 v5.l, v1.l row_ror:1
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 row_ror:15
-// GFX1250: v_cos_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x2f,0x01,0xff]
+v_cos_bf16 v5.l, v1.l row_ror:15
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_cos_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x50,0x01,0xff]
+v_cos_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_cos_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x5f,0x01,0x01]
+v_cos_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_cos_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x60,0x09,0x13]
+v_cos_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_cos_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfe,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
+v_cos_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cos_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfe,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_cos_bf16 v5.h, v1.h quad_perm:[3,2,1,0]
// GFX1250: v_cos_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7f,0x81,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff]
+v_cvt_f32_bf16 v5, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff]
+v_cvt_f32_bf16 v5, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 row_mirror
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff]
+v_cvt_f32_bf16 v5, v1.l row_mirror
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 row_half_mirror
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff]
+v_cvt_f32_bf16 v5, v1.l row_half_mirror
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 row_shl:1
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff]
+v_cvt_f32_bf16 v5, v1.l row_shl:1
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 row_shl:15
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff]
+v_cvt_f32_bf16 v5, v1.l row_shl:15
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 row_shr:1
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff]
+v_cvt_f32_bf16 v5, v1.l row_shr:1
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 row_shr:15
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff]
+v_cvt_f32_bf16 v5, v1.l row_shr:15
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 row_ror:1
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff]
+v_cvt_f32_bf16 v5, v1.l row_ror:1
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 row_ror:15
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff]
+v_cvt_f32_bf16 v5, v1.l row_ror:15
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff]
+v_cvt_f32_bf16 v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01]
+v_cvt_f32_bf16 v5, v1.l row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13]
+v_cvt_f32_bf16 v5, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_cvt_f32_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
+v_cvt_f32_bf16 v127, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_f32_bf16_dpp v127, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_cvt_f32_bf16 v5, v1.h quad_perm:[3,2,1,0]
@@ -750,24 +750,24 @@ v_cvt_pk_f16_fp8 v1, v2.h quad_perm:[0,1,2,3]
// GFX1250: v_cvt_pk_f16_fp8_dpp v1, v2.h quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xea,0x02,0x7e,0x82,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_i4_i8 v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf
-// GFX1250: v_sat_pk4_i4_i8_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe6,0x02,0x7e,0x02,0x39,0x00,0xff]
+v_sat_pk4_i4_i8 v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf
+// GFX1250: v_sat_pk4_i4_i8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe6,0x02,0x7e,0x02,0x39,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_i4_i8 v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1
-// GFX1250: v_sat_pk4_i4_i8_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xe6,0x02,0x7e,0x02,0x39,0x04,0xff]
+v_sat_pk4_i4_i8 v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1
+// GFX1250: v_sat_pk4_i4_i8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xe6,0x02,0x7e,0x02,0x39,0x04,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sat_pk4_i4_i8 v1.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf
// GFX1250: v_sat_pk4_i4_i8_dpp v1.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe6,0x02,0x7f,0x02,0x39,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_u4_u8 v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf
-// GFX1250: v_sat_pk4_u4_u8_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe8,0x02,0x7e,0x02,0x39,0x00,0xff]
+v_sat_pk4_u4_u8 v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf
+// GFX1250: v_sat_pk4_u4_u8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe8,0x02,0x7e,0x02,0x39,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_u4_u8 v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1
-// GFX1250: v_sat_pk4_u4_u8_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xe8,0x02,0x7e,0x02,0x39,0x04,0xff]
+v_sat_pk4_u4_u8 v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1
+// GFX1250: v_sat_pk4_u4_u8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xe8,0x02,0x7e,0x02,0x39,0x04,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sat_pk4_u4_u8 v1.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s
index 359aadc..2aabe39 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s
@@ -14,32 +14,32 @@ v_tanh_f32 v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX1250: v_tanh_f32_dpp v255, v255 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x3c,0xfe,0x7f,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_tanh_f16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x3e,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_tanh_f16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x3e,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_tanh_f16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0x3e,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_tanh_f16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_tanh_f16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0x3e,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_tanh_f16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x3e,0xfe,0x7e,0x7f,0x00,0x00,0x00]
+v_tanh_f16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_tanh_f16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x3e,0xfe,0x7e,0x7f,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_tanh_f16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_tanh_f16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x3e,0x0a,0x7f,0x81,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_tanh_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x94,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_tanh_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x94,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_tanh_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0x94,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_tanh_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_tanh_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0x94,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_tanh_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x94,0xfe,0x7e,0x7f,0x00,0x00,0x00]
+v_tanh_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_tanh_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x94,0xfe,0x7e,0x7f,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_tanh_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0]
@@ -58,152 +58,152 @@ v_prng_b32 v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX1250: v_prng_b32_dpp v255, v255 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x96,0xfe,0x7f,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rcp_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf2,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_rcp_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf2,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_rcp_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf2,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_rcp_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_rcp_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf2,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_rcp_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf2,0xfe,0x7e,0x7f,0x00,0x00,0x00]
+v_rcp_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_rcp_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf2,0xfe,0x7e,0x7f,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_rcp_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rcp_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf2,0x0a,0x7f,0x81,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sqrt_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf4,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_sqrt_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf4,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sqrt_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf4,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_sqrt_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf4,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_sqrt_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf4,0xfe,0x7e,0x7f,0x00,0x00,0x00]
+v_sqrt_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_sqrt_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf4,0xfe,0x7e,0x7f,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sqrt_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sqrt_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf4,0x0a,0x7f,0x81,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rsq_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf6,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_rsq_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf6,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_rsq_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf6,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_rsq_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_rsq_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf6,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_rsq_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf6,0xfe,0x7e,0x7f,0x00,0x00,0x00]
+v_rsq_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_rsq_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf6,0xfe,0x7e,0x7f,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_rsq_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rsq_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf6,0x0a,0x7f,0x81,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_log_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf8,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_log_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_log_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf8,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_log_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf8,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_log_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_log_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf8,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_log_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf8,0xfe,0x7e,0x7f,0x00,0x00,0x00]
+v_log_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_log_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf8,0xfe,0x7e,0x7f,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_log_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_log_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf8,0x0a,0x7f,0x81,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_exp_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfa,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_exp_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfa,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_exp_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfa,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_exp_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_exp_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfa,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_exp_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfa,0xfe,0x7e,0x7f,0x00,0x00,0x00]
+v_exp_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_exp_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfa,0xfe,0x7e,0x7f,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_exp_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_exp_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfa,0x0a,0x7f,0x81,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sin_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfc,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_sin_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfc,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sin_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfc,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_sin_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_sin_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfc,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_sin_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfc,0xfe,0x7e,0x7f,0x00,0x00,0x00]
+v_sin_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_sin_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfc,0xfe,0x7e,0x7f,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sin_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sin_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfc,0x0a,0x7f,0x81,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_cos_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfe,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_cos_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfe,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_cos_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfe,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_cos_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cos_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfe,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_cos_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfe,0xfe,0x7e,0x7f,0x00,0x00,0x00]
+v_cos_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cos_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfe,0xfe,0x7e,0x7f,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_cos_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cos_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfe,0x0a,0x7f,0x81,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_cvt_f32_bf16 v5, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_cvt_f32_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05]
+v_cvt_f32_bf16 v5, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_cvt_f32_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00]
+v_cvt_f32_bf16 v127, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_f32_bf16_dpp v127, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_cvt_f32_bf16 v5, v1.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_f32_bf16_dpp v5, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f16_bf8 v1, v2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_cvt_f16_bf8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05]
+v_cvt_f16_bf8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_f16_bf8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f16_bf8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_cvt_f16_bf8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05]
+v_cvt_f16_bf8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_f16_bf8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_cvt_f16_bf8 v1.h, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_f16_bf8_dpp v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf0,0x02,0x7f,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f16_fp8 v1, v2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_cvt_f16_fp8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xee,0x02,0x7e,0x02,0x77,0x39,0x05]
+v_cvt_f16_fp8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_f16_fp8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xee,0x02,0x7e,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f16_fp8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_cvt_f16_fp8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xee,0x02,0x7e,0x02,0x77,0x39,0x05]
+v_cvt_f16_fp8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_f16_fp8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xee,0x02,0x7e,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_cvt_f16_fp8 v1.h, v2 dpp8:[7,6,5,4,3,2,1,0]
@@ -226,24 +226,24 @@ v_cvt_pk_f16_fp8 v1, v2.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_pk_f16_fp8_dpp v1, v2.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xea,0x02,0x7e,0x82,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_i4_i8 v1, v2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sat_pk4_i4_i8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe6,0x02,0x7e,0x02,0x77,0x39,0x05]
+v_sat_pk4_i4_i8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_sat_pk4_i4_i8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe6,0x02,0x7e,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_i4_i8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sat_pk4_i4_i8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe6,0x02,0x7e,0x02,0x77,0x39,0x05]
+v_sat_pk4_i4_i8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_sat_pk4_i4_i8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe6,0x02,0x7e,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sat_pk4_i4_i8 v1.h, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sat_pk4_i4_i8_dpp v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe6,0x02,0x7f,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_u4_u8 v1, v2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sat_pk4_u4_u8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe8,0x02,0x7e,0x02,0x77,0x39,0x05]
+v_sat_pk4_u4_u8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_sat_pk4_u4_u8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe8,0x02,0x7e,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_u4_u8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sat_pk4_u4_u8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe8,0x02,0x7e,0x02,0x77,0x39,0x05]
+v_sat_pk4_u4_u8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_sat_pk4_u4_u8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe8,0x02,0x7e,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sat_pk4_u4_u8 v1.h, v2 dpp8:[7,6,5,4,3,2,1,0]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s
index b4d4e36..98d07ac 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s
@@ -52,32 +52,32 @@ v_bitop3_b32 v255, 0xaf123456, vcc_hi, null bitop3:103
v_bitop3_b16 v5.l, v1.l, v2.l, s3
// GFX1250: v_bitop3_b16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x33,0xd6,0x01,0x05,0x0e,0x00]
-v_bitop3_b16 v5, v1, v2, s3 bitop3:161
-// GFX1250: v_bitop3_b16 v5, v1, v2, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0x05,0x0e,0x30]
+v_bitop3_b16 v5.l, v1.l, v2.l, s3 bitop3:161
+// GFX1250: v_bitop3_b16 v5.l, v1.l, v2.l, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0x05,0x0e,0x30]
-v_bitop3_b16 v5, v255, s2, s105 bitop3:0x27
-// GFX1250: v_bitop3_b16 v5, v255, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x33,0xd6,0xff,0x05,0xa4,0xe1]
+v_bitop3_b16 v5.l, v255.l, s2, s105 bitop3:0x27
+// GFX1250: v_bitop3_b16 v5.l, v255.l, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x33,0xd6,0xff,0x05,0xa4,0xe1]
-v_bitop3_b16 v5, s1, v255, exec_hi bitop3:100
-// GFX1250: v_bitop3_b16 v5, s1, v255, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0xfe,0xff,0x89]
+v_bitop3_b16 v5.l, s1, v255.l, exec_hi bitop3:100
+// GFX1250: v_bitop3_b16 v5.l, s1, v255.l, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0xfe,0xff,0x89]
-v_bitop3_b16 v5, s105, s105, exec_lo bitop3:0
-// GFX1250: v_bitop3_b16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x33,0xd6,0x69,0xd2,0xf8,0x01]
+v_bitop3_b16 v5.l, s105, s105, exec_lo bitop3:0
+// GFX1250: v_bitop3_b16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x33,0xd6,0x69,0xd2,0xf8,0x01]
-v_bitop3_b16 v5, vcc_lo, ttmp15, v3 bitop3:0x15
-// GFX1250: v_bitop3_b16 v5, vcc_lo, ttmp15, v3 bitop3:0x15 ; encoding: [0x05,0x02,0x33,0xd6,0x6a,0xf6,0x0c,0xa4]
+v_bitop3_b16 v5.l, vcc_lo, ttmp15, v3.l bitop3:0x15
+// GFX1250: v_bitop3_b16 v5.l, vcc_lo, ttmp15, v3.l bitop3:0x15 ; encoding: [0x05,0x02,0x33,0xd6,0x6a,0xf6,0x0c,0xa4]
-v_bitop3_b16 v5, vcc_hi, 0xfe0b, v255 bitop3:63
-// GFX1250: v_bitop3_b16 v5, vcc_hi, 0xfe0b, v255 bitop3:0x3f ; encoding: [0x05,0x07,0x33,0xd6,0x6b,0xfe,0xfd,0xe7,0x0b,0xfe,0x00,0x00]
+v_bitop3_b16 v5.l, vcc_hi, 0xfe0b, v255.l bitop3:63
+// GFX1250: v_bitop3_b16 v5.l, vcc_hi, 0xfe0b, v255.l bitop3:0x3f ; encoding: [0x05,0x07,0x33,0xd6,0x6b,0xfe,0xfd,0xe7,0x0b,0xfe,0x00,0x00]
-v_bitop3_b16 v5, ttmp15, src_scc, ttmp15 bitop3:0x24
-// GFX1250: v_bitop3_b16 v5, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x33,0xd6,0x7b,0xfa,0xed,0x81]
+v_bitop3_b16 v5.l, ttmp15, src_scc, ttmp15 bitop3:0x24
+// GFX1250: v_bitop3_b16 v5.l, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x33,0xd6,0x7b,0xfa,0xed,0x81]
-v_bitop3_b16 v5, m0, 0.5, m0 bitop3:5
-// GFX1250: v_bitop3_b16 v5, m0, 0.5, m0 bitop3:5 ; encoding: [0x05,0x00,0x33,0xd6,0x7d,0xe0,0xf5,0xa1]
+v_bitop3_b16 v5.l, m0, 0.5, m0 bitop3:5
+// GFX1250: v_bitop3_b16 v5.l, m0, 0.5, m0 bitop3:5 ; encoding: [0x05,0x00,0x33,0xd6,0x7d,0xe0,0xf5,0xa1]
-v_bitop3_b16 v5, exec_lo, -1, vcc_hi bitop3:6
-// GFX1250: v_bitop3_b16 v5, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x33,0xd6,0x7e,0x82,0xad,0xc1]
+v_bitop3_b16 v5.l, exec_lo, -1, vcc_hi bitop3:6
+// GFX1250: v_bitop3_b16 v5.l, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x33,0xd6,0x7e,0x82,0xad,0xc1]
v_bitop3_b16 v5.h, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
// GFX1250: v_bitop3_b16 v5.h, exec_hi, null, vcc_lo op_sel:[1,1,1,1] ; encoding: [0x05,0x78,0x33,0xd6,0x7f,0xf8,0xa8,0x01]
@@ -563,17 +563,17 @@ v_cvt_sr_bf8_f16 v1, v2.l, v3
v_cvt_sr_bf8_f16 v1, v2.h, v3
// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
-v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:0
-// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:0
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
-v_cvt_sr_bf8_f16 v1, v2, s3
-// GFX1250: v_cvt_sr_bf8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+v_cvt_sr_bf8_f16 v1, v2.l, s3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
-v_cvt_sr_bf8_f16 v1, v2, 0x1234
-// GFX1250: v_cvt_sr_bf8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+v_cvt_sr_bf8_f16 v1, v2.l, 0x1234
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
-v_cvt_sr_bf8_f16 v1, -v2, v3
-// GFX1250: v_cvt_sr_bf8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+v_cvt_sr_bf8_f16 v1, -v2.l, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, -v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
v_cvt_sr_bf8_f16 v1, |v2.l|, v3
// GFX1250: v_cvt_sr_bf8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
@@ -605,14 +605,14 @@ v_cvt_sr_fp8_f16 v1, v2.l, v3
v_cvt_sr_fp8_f16 v1, v2.h, v3
// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
-v_cvt_sr_fp8_f16 v1, v2, s3
-// GFX1250: v_cvt_sr_fp8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+v_cvt_sr_fp8_f16 v1, v2.l, s3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
-v_cvt_sr_fp8_f16 v1, v2, 0x1234
-// GFX1250: v_cvt_sr_fp8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+v_cvt_sr_fp8_f16 v1, v2.l, 0x1234
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
-v_cvt_sr_fp8_f16 v1, -v2, v3
-// GFX1250: v_cvt_sr_fp8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+v_cvt_sr_fp8_f16 v1, -v2.l, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, -v2.l, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
v_cvt_sr_fp8_f16 v1, |v2.l|, v3
// GFX1250: v_cvt_sr_fp8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
@@ -644,11 +644,11 @@ v_cvt_pk_fp8_f32 v1.l, v2, v3
v_cvt_pk_fp8_f32 v1.h, v2, v3
// GFX1250: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
-v_cvt_pk_fp8_f32 v1, -v2, |v3|
-// GFX1250: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+v_cvt_pk_fp8_f32 v1.l, -v2, |v3|
+// GFX1250: v_cvt_pk_fp8_f32 v1.l, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
-v_cvt_pk_fp8_f32 v1, s2, 3
-// GFX1250: v_cvt_pk_fp8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+v_cvt_pk_fp8_f32 v1.l, s2, 3
+// GFX1250: v_cvt_pk_fp8_f32 v1.l, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp
// GFX1250: v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
@@ -656,14 +656,14 @@ v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp
v_cvt_pk_fp8_f32 v1.h, v2, v3 clamp
// GFX1250: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
-v_cvt_pk_bf8_f32 v1, v2, v3
-// GFX1250: v_cvt_pk_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+v_cvt_pk_bf8_f32 v1.l, v2, v3
+// GFX1250: v_cvt_pk_bf8_f32 v1.l, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
-v_cvt_pk_bf8_f32 v1, -v2, |v3|
-// GFX1250: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+v_cvt_pk_bf8_f32 v1.l, -v2, |v3|
+// GFX1250: v_cvt_pk_bf8_f32 v1.l, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
-v_cvt_pk_bf8_f32 v1, s2, 3
-// GFX1250: v_cvt_pk_bf8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+v_cvt_pk_bf8_f32 v1.l, s2, 3
+// GFX1250: v_cvt_pk_bf8_f32 v1.l, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
v_cvt_sr_fp8_f32 v1, v2, v3
// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s
index f766e52..fc0ea8b 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s
@@ -62,60 +62,60 @@ v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0]
// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:161 quad_perm:[0,1,2,3]
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:161 quad_perm:[0,1,2,3]
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0x27 row_mirror
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:100 row_half_mirror
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x8c,0x01,0x41,0x01,0xff]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:100 row_half_mirror
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x8c,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, v255 bitop3:0 row_shl:1
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v255.l bitop3:0 row_shl:1
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x16 row_shl:15
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x16 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x33,0xd6,0xfa,0x04,0xa6,0xc1,0x01,0x0f,0x01,0xff]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, s105 bitop3:0x16 row_shl:15
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, s105 bitop3:0x16 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x33,0xd6,0xfa,0x04,0xa6,0xc1,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:63 row_shr:1
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x33,0xd6,0xfa,0x04,0xae,0xe1,0x01,0x11,0x01,0xff]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi bitop3:63 row_shr:1
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x33,0xd6,0xfa,0x04,0xae,0xe1,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0x24 row_shr:15
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0xaa,0x81,0x01,0x1f,0x01,0xff]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo bitop3:0x24 row_shr:15
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0xaa,0x81,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:5 row_ror:1
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xee,0xa1,0x01,0x21,0x01,0xff]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, ttmp15 bitop3:5 row_ror:1
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, ttmp15 bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xee,0xa1,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:6 row_ror:15
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0xc1,0x01,0x2f,0x01,0xff]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_hi bitop3:6 row_ror:15
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_hi bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0xc1,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:77 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x33,0xd6,0xfa,0x04,0xfa,0xa9,0x01,0x50,0x01,0xff]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo bitop3:77 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x33,0xd6,0xfa,0x04,0xfa,0xa9,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:88 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, null bitop3:88 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, null bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:99 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x06,0x6b,0x01,0x60,0x09,0x13]
+v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:99 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x06,0x6b,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:101 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x04,0x33,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x05,0x30]
+v_bitop3_b16_e64_dpp v255.l, v255.l, v255.l, src_scc bitop3:101 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_bitop3_b16_e64_dpp v255.l, v255.l, v255.l, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x04,0x33,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
@@ -470,12 +470,12 @@ v_cvt_sr_bf8_f16 v1, v2.h, v3 quad_perm:[0,1,2,3] fi:1
// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
-// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
-// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3]
@@ -494,12 +494,12 @@ v_cvt_sr_fp8_f16 v1, v2.h, v3 quad_perm:[0,1,2,3] fi:1
// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
-// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
-// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s
index 8e73ecb..5ac9eb4 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s
@@ -46,50 +46,50 @@ v_bfrev_b32_e64 v5, src_scc
v_bfrev_b32_e64 v255, 0xaf123456
// GFX1250: v_bfrev_b32_e64 v255, 0xaf123456 ; encoding: [0xff,0x00,0xb8,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf]
-v_ceil_f16_e64 v5, v1
-// GFX1250: v_ceil_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xdc,0xd5,0x01,0x01,0x00,0x00]
+v_ceil_f16_e64 v5.l, v1.l
+// GFX1250: v_ceil_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xdc,0xd5,0x01,0x01,0x00,0x00]
-v_ceil_f16_e64 v5, v255
-// GFX1250: v_ceil_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xdc,0xd5,0xff,0x01,0x00,0x00]
+v_ceil_f16_e64 v5.l, v255.l
+// GFX1250: v_ceil_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xdc,0xd5,0xff,0x01,0x00,0x00]
-v_ceil_f16_e64 v5, s1
-// GFX1250: v_ceil_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xdc,0xd5,0x01,0x00,0x00,0x00]
+v_ceil_f16_e64 v5.l, s1
+// GFX1250: v_ceil_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xdc,0xd5,0x01,0x00,0x00,0x00]
-v_ceil_f16_e64 v5, s105
-// GFX1250: v_ceil_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xdc,0xd5,0x69,0x00,0x00,0x00]
+v_ceil_f16_e64 v5.l, s105
+// GFX1250: v_ceil_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xdc,0xd5,0x69,0x00,0x00,0x00]
-v_ceil_f16_e64 v5, vcc_lo
-// GFX1250: v_ceil_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xdc,0xd5,0x6a,0x00,0x00,0x00]
+v_ceil_f16_e64 v5.l, vcc_lo
+// GFX1250: v_ceil_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xdc,0xd5,0x6a,0x00,0x00,0x00]
-v_ceil_f16_e64 v5, vcc_hi
-// GFX1250: v_ceil_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xdc,0xd5,0x6b,0x00,0x00,0x00]
+v_ceil_f16_e64 v5.l, vcc_hi
+// GFX1250: v_ceil_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xdc,0xd5,0x6b,0x00,0x00,0x00]
-v_ceil_f16_e64 v5, ttmp15
-// GFX1250: v_ceil_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xdc,0xd5,0x7b,0x00,0x00,0x00]
+v_ceil_f16_e64 v5.l, ttmp15
+// GFX1250: v_ceil_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xdc,0xd5,0x7b,0x00,0x00,0x00]
-v_ceil_f16_e64 v5, m0
-// GFX1250: v_ceil_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xdc,0xd5,0x7d,0x00,0x00,0x00]
+v_ceil_f16_e64 v5.l, m0
+// GFX1250: v_ceil_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xdc,0xd5,0x7d,0x00,0x00,0x00]
-v_ceil_f16_e64 v5, exec_lo
-// GFX1250: v_ceil_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xdc,0xd5,0x7e,0x00,0x00,0x00]
+v_ceil_f16_e64 v5.l, exec_lo
+// GFX1250: v_ceil_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xdc,0xd5,0x7e,0x00,0x00,0x00]
-v_ceil_f16_e64 v5, exec_hi
-// GFX1250: v_ceil_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xdc,0xd5,0x7f,0x00,0x00,0x00]
+v_ceil_f16_e64 v5.l, exec_hi
+// GFX1250: v_ceil_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xdc,0xd5,0x7f,0x00,0x00,0x00]
-v_ceil_f16_e64 v5, null
-// GFX1250: v_ceil_f16_e64 v5, null ; encoding: [0x05,0x00,0xdc,0xd5,0x7c,0x00,0x00,0x00]
+v_ceil_f16_e64 v5.l, null
+// GFX1250: v_ceil_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xdc,0xd5,0x7c,0x00,0x00,0x00]
-v_ceil_f16_e64 v5, -1
-// GFX1250: v_ceil_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xdc,0xd5,0xc1,0x00,0x00,0x00]
+v_ceil_f16_e64 v5.l, -1
+// GFX1250: v_ceil_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xdc,0xd5,0xc1,0x00,0x00,0x00]
-v_ceil_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_ceil_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xdc,0xd5,0xf0,0x00,0x00,0x08]
+v_ceil_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_ceil_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xdc,0xd5,0xf0,0x00,0x00,0x08]
-v_ceil_f16_e64 v5, src_scc mul:4
-// GFX1250: v_ceil_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xdc,0xd5,0xfd,0x00,0x00,0x10]
+v_ceil_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_ceil_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xdc,0xd5,0xfd,0x00,0x00,0x10]
-v_ceil_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_ceil_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdc,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_ceil_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_ceil_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdc,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_ceil_f16 v5.l, v128.l
// GFX1250: v_ceil_f16_e64 v5.l, v128.l ; encoding: [0x05,0x00,0xdc,0xd5,0x80,0x01,0x00,0x00]
@@ -268,50 +268,50 @@ v_clz_i32_u32_e64 v5, src_scc
v_clz_i32_u32_e64 v255, 0xaf123456
// GFX1250: v_clz_i32_u32_e64 v255, 0xaf123456 ; encoding: [0xff,0x00,0xb9,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf]
-v_cos_f16_e64 v5, v1
-// GFX1250: v_cos_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xe1,0xd5,0x01,0x01,0x00,0x00]
+v_cos_f16_e64 v5.l, v1.l
+// GFX1250: v_cos_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe1,0xd5,0x01,0x01,0x00,0x00]
-v_cos_f16_e64 v5, v255
-// GFX1250: v_cos_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xe1,0xd5,0xff,0x01,0x00,0x00]
+v_cos_f16_e64 v5.l, v255.l
+// GFX1250: v_cos_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe1,0xd5,0xff,0x01,0x00,0x00]
-v_cos_f16_e64 v5, s1
-// GFX1250: v_cos_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xe1,0xd5,0x01,0x00,0x00,0x00]
+v_cos_f16_e64 v5.l, s1
+// GFX1250: v_cos_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe1,0xd5,0x01,0x00,0x00,0x00]
-v_cos_f16_e64 v5, s105
-// GFX1250: v_cos_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xe1,0xd5,0x69,0x00,0x00,0x00]
+v_cos_f16_e64 v5.l, s105
+// GFX1250: v_cos_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe1,0xd5,0x69,0x00,0x00,0x00]
-v_cos_f16_e64 v5, vcc_lo
-// GFX1250: v_cos_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe1,0xd5,0x6a,0x00,0x00,0x00]
+v_cos_f16_e64 v5.l, vcc_lo
+// GFX1250: v_cos_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe1,0xd5,0x6a,0x00,0x00,0x00]
-v_cos_f16_e64 v5, vcc_hi
-// GFX1250: v_cos_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe1,0xd5,0x6b,0x00,0x00,0x00]
+v_cos_f16_e64 v5.l, vcc_hi
+// GFX1250: v_cos_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe1,0xd5,0x6b,0x00,0x00,0x00]
-v_cos_f16_e64 v5, ttmp15
-// GFX1250: v_cos_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe1,0xd5,0x7b,0x00,0x00,0x00]
+v_cos_f16_e64 v5.l, ttmp15
+// GFX1250: v_cos_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe1,0xd5,0x7b,0x00,0x00,0x00]
-v_cos_f16_e64 v5, m0
-// GFX1250: v_cos_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xe1,0xd5,0x7d,0x00,0x00,0x00]
+v_cos_f16_e64 v5.l, m0
+// GFX1250: v_cos_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe1,0xd5,0x7d,0x00,0x00,0x00]
-v_cos_f16_e64 v5, exec_lo
-// GFX1250: v_cos_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe1,0xd5,0x7e,0x00,0x00,0x00]
+v_cos_f16_e64 v5.l, exec_lo
+// GFX1250: v_cos_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe1,0xd5,0x7e,0x00,0x00,0x00]
-v_cos_f16_e64 v5, exec_hi
-// GFX1250: v_cos_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe1,0xd5,0x7f,0x00,0x00,0x00]
+v_cos_f16_e64 v5.l, exec_hi
+// GFX1250: v_cos_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe1,0xd5,0x7f,0x00,0x00,0x00]
-v_cos_f16_e64 v5, null
-// GFX1250: v_cos_f16_e64 v5, null ; encoding: [0x05,0x00,0xe1,0xd5,0x7c,0x00,0x00,0x00]
+v_cos_f16_e64 v5.l, null
+// GFX1250: v_cos_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xe1,0xd5,0x7c,0x00,0x00,0x00]
-v_cos_f16_e64 v5, -1
-// GFX1250: v_cos_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xe1,0xd5,0xc1,0x00,0x00,0x00]
+v_cos_f16_e64 v5.l, -1
+// GFX1250: v_cos_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe1,0xd5,0xc1,0x00,0x00,0x00]
-v_cos_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_cos_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xe1,0xd5,0xf0,0x00,0x00,0x08]
+v_cos_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_cos_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xe1,0xd5,0xf0,0x00,0x00,0x08]
-v_cos_f16_e64 v5, src_scc mul:4
-// GFX1250: v_cos_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xe1,0xd5,0xfd,0x00,0x00,0x10]
+v_cos_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_cos_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xe1,0xd5,0xfd,0x00,0x00,0x10]
-v_cos_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_cos_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xe1,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_cos_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_cos_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xe1,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_cos_f16 v5.l, v128.l
// GFX1250: v_cos_f16_e64 v5.l, v128.l ; encoding: [0x05,0x00,0xe1,0xd5,0x80,0x01,0x00,0x00]
@@ -502,11 +502,11 @@ v_cvt_pk_f32_bf8_e64 v[2:3], 3
v_cvt_pk_f32_bf8_e64 v[2:3], 3 op_sel:[1,0]
// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], 3 op_sel:[1,0] ; encoding: [0x02,0x08,0xef,0xd5,0x83,0x00,0x00,0x00]
-v_cvt_pk_f32_bf8_e64 v[2:3], v3
-// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v3 ; encoding: [0x02,0x00,0xef,0xd5,0x03,0x01,0x00,0x00]
+v_cvt_pk_f32_bf8_e64 v[2:3], v3.l
+// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v3.l ; encoding: [0x02,0x00,0xef,0xd5,0x03,0x01,0x00,0x00]
-v_cvt_pk_f32_bf8_e64 v[2:3], v3 op_sel:[1,0]
-// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v3 op_sel:[1,0] ; encoding: [0x02,0x08,0xef,0xd5,0x03,0x01,0x00,0x00]
+v_cvt_pk_f32_bf8_e64 v[2:3], v3.h op_sel:[1,0]
+// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v3.h op_sel:[1,0] ; encoding: [0x02,0x08,0xef,0xd5,0x03,0x01,0x00,0x00]
v_cvt_pk_f32_bf8 v[2:3], v128.h
// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v128.h op_sel:[1,0] ; encoding: [0x02,0x08,0xef,0xd5,0x80,0x01,0x00,0x00]
@@ -526,11 +526,11 @@ v_cvt_pk_f32_fp8_e64 v[2:3], 3
v_cvt_pk_f32_fp8_e64 v[2:3], 3 op_sel:[1,0]
// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], 3 op_sel:[1,0] ; encoding: [0x02,0x08,0xee,0xd5,0x83,0x00,0x00,0x00]
-v_cvt_pk_f32_fp8_e64 v[2:3], v3
-// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v3 ; encoding: [0x02,0x00,0xee,0xd5,0x03,0x01,0x00,0x00]
+v_cvt_pk_f32_fp8_e64 v[2:3], v3.l
+// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v3.l ; encoding: [0x02,0x00,0xee,0xd5,0x03,0x01,0x00,0x00]
-v_cvt_pk_f32_fp8_e64 v[2:3], v3 op_sel:[1,0]
-// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v3 op_sel:[1,0] ; encoding: [0x02,0x08,0xee,0xd5,0x03,0x01,0x00,0x00]
+v_cvt_pk_f32_fp8_e64 v[2:3], v3.h op_sel:[1,0]
+// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v3.h op_sel:[1,0] ; encoding: [0x02,0x08,0xee,0xd5,0x03,0x01,0x00,0x00]
v_cvt_pk_f32_fp8 v[2:3], v128.h
// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v128.h op_sel:[1,0] ; encoding: [0x02,0x08,0xee,0xd5,0x80,0x01,0x00,0x00]
@@ -568,50 +568,50 @@ v_cvt_pk_f32_fp8_e64 v[4:5], v3
v_cvt_pk_f32_fp8_e64 v[4:5], v3 op_sel:[1,0]
// GFX1250: v_cvt_pk_f32_fp8_e64 v[4:5], v3 op_sel:[1,0] ; encoding: [0x04,0x08,0xee,0xd5,0x03,0x01,0x00,0x00]
-v_cvt_f16_f32_e64 v5, v1
-// GFX1250: v_cvt_f16_f32_e64 v5, v1 ; encoding: [0x05,0x00,0x8a,0xd5,0x01,0x01,0x00,0x00]
+v_cvt_f16_f32_e64 v5.l, v1
+// GFX1250: v_cvt_f16_f32_e64 v5.l, v1 ; encoding: [0x05,0x00,0x8a,0xd5,0x01,0x01,0x00,0x00]
-v_cvt_f16_f32_e64 v5, v255
-// GFX1250: v_cvt_f16_f32_e64 v5, v255 ; encoding: [0x05,0x00,0x8a,0xd5,0xff,0x01,0x00,0x00]
+v_cvt_f16_f32_e64 v5.l, v255
+// GFX1250: v_cvt_f16_f32_e64 v5.l, v255 ; encoding: [0x05,0x00,0x8a,0xd5,0xff,0x01,0x00,0x00]
-v_cvt_f16_f32_e64 v5, s1
-// GFX1250: v_cvt_f16_f32_e64 v5, s1 ; encoding: [0x05,0x00,0x8a,0xd5,0x01,0x00,0x00,0x00]
+v_cvt_f16_f32_e64 v5.l, s1
+// GFX1250: v_cvt_f16_f32_e64 v5.l, s1 ; encoding: [0x05,0x00,0x8a,0xd5,0x01,0x00,0x00,0x00]
-v_cvt_f16_f32_e64 v5, s105
-// GFX1250: v_cvt_f16_f32_e64 v5, s105 ; encoding: [0x05,0x00,0x8a,0xd5,0x69,0x00,0x00,0x00]
+v_cvt_f16_f32_e64 v5.l, s105
+// GFX1250: v_cvt_f16_f32_e64 v5.l, s105 ; encoding: [0x05,0x00,0x8a,0xd5,0x69,0x00,0x00,0x00]
-v_cvt_f16_f32_e64 v5, vcc_lo
-// GFX1250: v_cvt_f16_f32_e64 v5, vcc_lo ; encoding: [0x05,0x00,0x8a,0xd5,0x6a,0x00,0x00,0x00]
+v_cvt_f16_f32_e64 v5.l, vcc_lo
+// GFX1250: v_cvt_f16_f32_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0x8a,0xd5,0x6a,0x00,0x00,0x00]
-v_cvt_f16_f32_e64 v5, vcc_hi
-// GFX1250: v_cvt_f16_f32_e64 v5, vcc_hi ; encoding: [0x05,0x00,0x8a,0xd5,0x6b,0x00,0x00,0x00]
+v_cvt_f16_f32_e64 v5.l, vcc_hi
+// GFX1250: v_cvt_f16_f32_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0x8a,0xd5,0x6b,0x00,0x00,0x00]
-v_cvt_f16_f32_e64 v5, ttmp15
-// GFX1250: v_cvt_f16_f32_e64 v5, ttmp15 ; encoding: [0x05,0x00,0x8a,0xd5,0x7b,0x00,0x00,0x00]
+v_cvt_f16_f32_e64 v5.l, ttmp15
+// GFX1250: v_cvt_f16_f32_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0x8a,0xd5,0x7b,0x00,0x00,0x00]
-v_cvt_f16_f32_e64 v5, m0
-// GFX1250: v_cvt_f16_f32_e64 v5, m0 ; encoding: [0x05,0x00,0x8a,0xd5,0x7d,0x00,0x00,0x00]
+v_cvt_f16_f32_e64 v5.l, m0
+// GFX1250: v_cvt_f16_f32_e64 v5.l, m0 ; encoding: [0x05,0x00,0x8a,0xd5,0x7d,0x00,0x00,0x00]
-v_cvt_f16_f32_e64 v5, exec_lo
-// GFX1250: v_cvt_f16_f32_e64 v5, exec_lo ; encoding: [0x05,0x00,0x8a,0xd5,0x7e,0x00,0x00,0x00]
+v_cvt_f16_f32_e64 v5.l, exec_lo
+// GFX1250: v_cvt_f16_f32_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0x8a,0xd5,0x7e,0x00,0x00,0x00]
-v_cvt_f16_f32_e64 v5, exec_hi
-// GFX1250: v_cvt_f16_f32_e64 v5, exec_hi ; encoding: [0x05,0x00,0x8a,0xd5,0x7f,0x00,0x00,0x00]
+v_cvt_f16_f32_e64 v5.l, exec_hi
+// GFX1250: v_cvt_f16_f32_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0x8a,0xd5,0x7f,0x00,0x00,0x00]
-v_cvt_f16_f32_e64 v5, null
-// GFX1250: v_cvt_f16_f32_e64 v5, null ; encoding: [0x05,0x00,0x8a,0xd5,0x7c,0x00,0x00,0x00]
+v_cvt_f16_f32_e64 v5.l, null
+// GFX1250: v_cvt_f16_f32_e64 v5.l, null ; encoding: [0x05,0x00,0x8a,0xd5,0x7c,0x00,0x00,0x00]
-v_cvt_f16_f32_e64 v5, -1
-// GFX1250: v_cvt_f16_f32_e64 v5, -1 ; encoding: [0x05,0x00,0x8a,0xd5,0xc1,0x00,0x00,0x00]
+v_cvt_f16_f32_e64 v5.l, -1
+// GFX1250: v_cvt_f16_f32_e64 v5.l, -1 ; encoding: [0x05,0x00,0x8a,0xd5,0xc1,0x00,0x00,0x00]
-v_cvt_f16_f32_e64 v5, 0.5 mul:2
-// GFX1250: v_cvt_f16_f32_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0x8a,0xd5,0xf0,0x00,0x00,0x08]
+v_cvt_f16_f32_e64 v5.l, 0.5 mul:2
+// GFX1250: v_cvt_f16_f32_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0x8a,0xd5,0xf0,0x00,0x00,0x08]
-v_cvt_f16_f32_e64 v5, src_scc mul:4
-// GFX1250: v_cvt_f16_f32_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0x8a,0xd5,0xfd,0x00,0x00,0x10]
+v_cvt_f16_f32_e64 v5.l, src_scc mul:4
+// GFX1250: v_cvt_f16_f32_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0x8a,0xd5,0xfd,0x00,0x00,0x10]
-v_cvt_f16_f32_e64 v255, -|0xaf123456| clamp div:2
-// GFX1250: v_cvt_f16_f32_e64 v255, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0x8a,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf]
+v_cvt_f16_f32_e64 v255.l, -|0xaf123456| clamp div:2
+// GFX1250: v_cvt_f16_f32_e64 v255.l, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0x8a,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf]
v_cvt_f16_f32 v128.l, v15
// GFX1250: v_cvt_f16_f32_e64 v128.l, v15 ; encoding: [0x80,0x00,0x8a,0xd5,0x0f,0x01,0x00,0x00]
@@ -619,50 +619,50 @@ v_cvt_f16_f32 v128.l, v15
v_cvt_f16_f32 v128.h, v15
// GFX1250: v_cvt_f16_f32_e64 v128.h, v15 op_sel:[0,1] ; encoding: [0x80,0x40,0x8a,0xd5,0x0f,0x01,0x00,0x00]
-v_cvt_f16_i16_e64 v5, v1
-// GFX1250: v_cvt_f16_i16_e64 v5, v1 ; encoding: [0x05,0x00,0xd1,0xd5,0x01,0x01,0x00,0x00]
+v_cvt_f16_i16_e64 v5.l, v1.l
+// GFX1250: v_cvt_f16_i16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd1,0xd5,0x01,0x01,0x00,0x00]
-v_cvt_f16_i16_e64 v5, v255
-// GFX1250: v_cvt_f16_i16_e64 v5, v255 ; encoding: [0x05,0x00,0xd1,0xd5,0xff,0x01,0x00,0x00]
+v_cvt_f16_i16_e64 v5.l, v255.l
+// GFX1250: v_cvt_f16_i16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd1,0xd5,0xff,0x01,0x00,0x00]
-v_cvt_f16_i16_e64 v5, s1
-// GFX1250: v_cvt_f16_i16_e64 v5, s1 ; encoding: [0x05,0x00,0xd1,0xd5,0x01,0x00,0x00,0x00]
+v_cvt_f16_i16_e64 v5.l, s1
+// GFX1250: v_cvt_f16_i16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd1,0xd5,0x01,0x00,0x00,0x00]
-v_cvt_f16_i16_e64 v5, s105
-// GFX1250: v_cvt_f16_i16_e64 v5, s105 ; encoding: [0x05,0x00,0xd1,0xd5,0x69,0x00,0x00,0x00]
+v_cvt_f16_i16_e64 v5.l, s105
+// GFX1250: v_cvt_f16_i16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd1,0xd5,0x69,0x00,0x00,0x00]
-v_cvt_f16_i16_e64 v5, vcc_lo
-// GFX1250: v_cvt_f16_i16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd1,0xd5,0x6a,0x00,0x00,0x00]
+v_cvt_f16_i16_e64 v5.l, vcc_lo
+// GFX1250: v_cvt_f16_i16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd1,0xd5,0x6a,0x00,0x00,0x00]
-v_cvt_f16_i16_e64 v5, vcc_hi
-// GFX1250: v_cvt_f16_i16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd1,0xd5,0x6b,0x00,0x00,0x00]
+v_cvt_f16_i16_e64 v5.l, vcc_hi
+// GFX1250: v_cvt_f16_i16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd1,0xd5,0x6b,0x00,0x00,0x00]
-v_cvt_f16_i16_e64 v5, ttmp15
-// GFX1250: v_cvt_f16_i16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd1,0xd5,0x7b,0x00,0x00,0x00]
+v_cvt_f16_i16_e64 v5.l, ttmp15
+// GFX1250: v_cvt_f16_i16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd1,0xd5,0x7b,0x00,0x00,0x00]
-v_cvt_f16_i16_e64 v5, m0
-// GFX1250: v_cvt_f16_i16_e64 v5, m0 ; encoding: [0x05,0x00,0xd1,0xd5,0x7d,0x00,0x00,0x00]
+v_cvt_f16_i16_e64 v5.l, m0
+// GFX1250: v_cvt_f16_i16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd1,0xd5,0x7d,0x00,0x00,0x00]
-v_cvt_f16_i16_e64 v5, exec_lo
-// GFX1250: v_cvt_f16_i16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd1,0xd5,0x7e,0x00,0x00,0x00]
+v_cvt_f16_i16_e64 v5.l, exec_lo
+// GFX1250: v_cvt_f16_i16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd1,0xd5,0x7e,0x00,0x00,0x00]
-v_cvt_f16_i16_e64 v5, exec_hi
-// GFX1250: v_cvt_f16_i16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd1,0xd5,0x7f,0x00,0x00,0x00]
+v_cvt_f16_i16_e64 v5.l, exec_hi
+// GFX1250: v_cvt_f16_i16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd1,0xd5,0x7f,0x00,0x00,0x00]
-v_cvt_f16_i16_e64 v5, null
-// GFX1250: v_cvt_f16_i16_e64 v5, null ; encoding: [0x05,0x00,0xd1,0xd5,0x7c,0x00,0x00,0x00]
+v_cvt_f16_i16_e64 v5.l, null
+// GFX1250: v_cvt_f16_i16_e64 v5.l, null ; encoding: [0x05,0x00,0xd1,0xd5,0x7c,0x00,0x00,0x00]
-v_cvt_f16_i16_e64 v5, -1
-// GFX1250: v_cvt_f16_i16_e64 v5, -1 ; encoding: [0x05,0x00,0xd1,0xd5,0xc1,0x00,0x00,0x00]
+v_cvt_f16_i16_e64 v5.l, -1
+// GFX1250: v_cvt_f16_i16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd1,0xd5,0xc1,0x00,0x00,0x00]
-v_cvt_f16_i16_e64 v5, 0.5 mul:2
-// GFX1250: v_cvt_f16_i16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd1,0xd5,0xf0,0x00,0x00,0x08]
+v_cvt_f16_i16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_cvt_f16_i16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd1,0xd5,0xf0,0x00,0x00,0x08]
-v_cvt_f16_i16_e64 v5, src_scc mul:4
-// GFX1250: v_cvt_f16_i16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd1,0xd5,0xfd,0x00,0x00,0x10]
+v_cvt_f16_i16_e64 v5.l, src_scc mul:4
+// GFX1250: v_cvt_f16_i16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd1,0xd5,0xfd,0x00,0x00,0x10]
-v_cvt_f16_i16_e64 v255, 0xfe0b clamp div:2
-// GFX1250: v_cvt_f16_i16_e64 v255, 0xfe0b clamp div:2 ; encoding: [0xff,0x80,0xd1,0xd5,0xff,0x00,0x00,0x18,0x0b,0xfe,0x00,0x00]
+v_cvt_f16_i16_e64 v255.l, 0xfe0b clamp div:2
+// GFX1250: v_cvt_f16_i16_e64 v255.l, 0xfe0b clamp div:2 ; encoding: [0xff,0x80,0xd1,0xd5,0xff,0x00,0x00,0x18,0x0b,0xfe,0x00,0x00]
v_cvt_f16_i16 v128.l, v15.l
// GFX1250: v_cvt_f16_i16_e64 v128.l, v15.l ; encoding: [0x80,0x00,0xd1,0xd5,0x0f,0x01,0x00,0x00]
@@ -670,50 +670,50 @@ v_cvt_f16_i16 v128.l, v15.l
v_cvt_f16_i16 v128.h, v15.h
// GFX1250: v_cvt_f16_i16_e64 v128.h, v15.h op_sel:[1,1] ; encoding: [0x80,0x48,0xd1,0xd5,0x0f,0x01,0x00,0x00]
-v_cvt_f16_u16_e64 v5, v1
-// GFX1250: v_cvt_f16_u16_e64 v5, v1 ; encoding: [0x05,0x00,0xd0,0xd5,0x01,0x01,0x00,0x00]
+v_cvt_f16_u16_e64 v5.l, v1.l
+// GFX1250: v_cvt_f16_u16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd0,0xd5,0x01,0x01,0x00,0x00]
-v_cvt_f16_u16_e64 v5, v255
-// GFX1250: v_cvt_f16_u16_e64 v5, v255 ; encoding: [0x05,0x00,0xd0,0xd5,0xff,0x01,0x00,0x00]
+v_cvt_f16_u16_e64 v5.l, v255.l
+// GFX1250: v_cvt_f16_u16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd0,0xd5,0xff,0x01,0x00,0x00]
-v_cvt_f16_u16_e64 v5, s1
-// GFX1250: v_cvt_f16_u16_e64 v5, s1 ; encoding: [0x05,0x00,0xd0,0xd5,0x01,0x00,0x00,0x00]
+v_cvt_f16_u16_e64 v5.l, s1
+// GFX1250: v_cvt_f16_u16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd0,0xd5,0x01,0x00,0x00,0x00]
-v_cvt_f16_u16_e64 v5, s105
-// GFX1250: v_cvt_f16_u16_e64 v5, s105 ; encoding: [0x05,0x00,0xd0,0xd5,0x69,0x00,0x00,0x00]
+v_cvt_f16_u16_e64 v5.l, s105
+// GFX1250: v_cvt_f16_u16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd0,0xd5,0x69,0x00,0x00,0x00]
-v_cvt_f16_u16_e64 v5, vcc_lo
-// GFX1250: v_cvt_f16_u16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd0,0xd5,0x6a,0x00,0x00,0x00]
+v_cvt_f16_u16_e64 v5.l, vcc_lo
+// GFX1250: v_cvt_f16_u16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd0,0xd5,0x6a,0x00,0x00,0x00]
-v_cvt_f16_u16_e64 v5, vcc_hi
-// GFX1250: v_cvt_f16_u16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd0,0xd5,0x6b,0x00,0x00,0x00]
+v_cvt_f16_u16_e64 v5.l, vcc_hi
+// GFX1250: v_cvt_f16_u16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd0,0xd5,0x6b,0x00,0x00,0x00]
-v_cvt_f16_u16_e64 v5, ttmp15
-// GFX1250: v_cvt_f16_u16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd0,0xd5,0x7b,0x00,0x00,0x00]
+v_cvt_f16_u16_e64 v5.l, ttmp15
+// GFX1250: v_cvt_f16_u16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd0,0xd5,0x7b,0x00,0x00,0x00]
-v_cvt_f16_u16_e64 v5, m0
-// GFX1250: v_cvt_f16_u16_e64 v5, m0 ; encoding: [0x05,0x00,0xd0,0xd5,0x7d,0x00,0x00,0x00]
+v_cvt_f16_u16_e64 v5.l, m0
+// GFX1250: v_cvt_f16_u16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd0,0xd5,0x7d,0x00,0x00,0x00]
-v_cvt_f16_u16_e64 v5, exec_lo
-// GFX1250: v_cvt_f16_u16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd0,0xd5,0x7e,0x00,0x00,0x00]
+v_cvt_f16_u16_e64 v5.l, exec_lo
+// GFX1250: v_cvt_f16_u16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd0,0xd5,0x7e,0x00,0x00,0x00]
-v_cvt_f16_u16_e64 v5, exec_hi
-// GFX1250: v_cvt_f16_u16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd0,0xd5,0x7f,0x00,0x00,0x00]
+v_cvt_f16_u16_e64 v5.l, exec_hi
+// GFX1250: v_cvt_f16_u16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd0,0xd5,0x7f,0x00,0x00,0x00]
-v_cvt_f16_u16_e64 v5, null
-// GFX1250: v_cvt_f16_u16_e64 v5, null ; encoding: [0x05,0x00,0xd0,0xd5,0x7c,0x00,0x00,0x00]
+v_cvt_f16_u16_e64 v5.l, null
+// GFX1250: v_cvt_f16_u16_e64 v5.l, null ; encoding: [0x05,0x00,0xd0,0xd5,0x7c,0x00,0x00,0x00]
-v_cvt_f16_u16_e64 v5, -1
-// GFX1250: v_cvt_f16_u16_e64 v5, -1 ; encoding: [0x05,0x00,0xd0,0xd5,0xc1,0x00,0x00,0x00]
+v_cvt_f16_u16_e64 v5.l, -1
+// GFX1250: v_cvt_f16_u16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd0,0xd5,0xc1,0x00,0x00,0x00]
-v_cvt_f16_u16_e64 v5, 0.5 mul:2
-// GFX1250: v_cvt_f16_u16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd0,0xd5,0xf0,0x00,0x00,0x08]
+v_cvt_f16_u16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_cvt_f16_u16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd0,0xd5,0xf0,0x00,0x00,0x08]
-v_cvt_f16_u16_e64 v5, src_scc mul:4
-// GFX1250: v_cvt_f16_u16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd0,0xd5,0xfd,0x00,0x00,0x10]
+v_cvt_f16_u16_e64 v5.l, src_scc mul:4
+// GFX1250: v_cvt_f16_u16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd0,0xd5,0xfd,0x00,0x00,0x10]
-v_cvt_f16_u16_e64 v255, 0xfe0b clamp div:2
-// GFX1250: v_cvt_f16_u16_e64 v255, 0xfe0b clamp div:2 ; encoding: [0xff,0x80,0xd0,0xd5,0xff,0x00,0x00,0x18,0x0b,0xfe,0x00,0x00]
+v_cvt_f16_u16_e64 v255.l, 0xfe0b clamp div:2
+// GFX1250: v_cvt_f16_u16_e64 v255.l, 0xfe0b clamp div:2 ; encoding: [0xff,0x80,0xd0,0xd5,0xff,0x00,0x00,0x18,0x0b,0xfe,0x00,0x00]
v_cvt_f16_u16 v128.l, v15.l
// GFX1250: v_cvt_f16_u16_e64 v128.l, v15.l ; encoding: [0x80,0x00,0xd0,0xd5,0x0f,0x01,0x00,0x00]
@@ -721,11 +721,11 @@ v_cvt_f16_u16 v128.l, v15.l
v_cvt_f16_u16 v128.h, v15.h
// GFX1250: v_cvt_f16_u16_e64 v128.h, v15.h op_sel:[1,1] ; encoding: [0x80,0x48,0xd0,0xd5,0x0f,0x01,0x00,0x00]
-v_cvt_f32_f16_e64 v5, v1
-// GFX1250: v_cvt_f32_f16_e64 v5, v1 ; encoding: [0x05,0x00,0x8b,0xd5,0x01,0x01,0x00,0x00]
+v_cvt_f32_f16_e64 v5, v1.l
+// GFX1250: v_cvt_f32_f16_e64 v5, v1.l ; encoding: [0x05,0x00,0x8b,0xd5,0x01,0x01,0x00,0x00]
-v_cvt_f32_f16_e64 v5, v255
-// GFX1250: v_cvt_f32_f16_e64 v5, v255 ; encoding: [0x05,0x00,0x8b,0xd5,0xff,0x01,0x00,0x00]
+v_cvt_f32_f16_e64 v5, v255.l
+// GFX1250: v_cvt_f32_f16_e64 v5, v255.l ; encoding: [0x05,0x00,0x8b,0xd5,0xff,0x01,0x00,0x00]
v_cvt_f32_f16_e64 v5, s1
// GFX1250: v_cvt_f32_f16_e64 v5, s1 ; encoding: [0x05,0x00,0x8b,0xd5,0x01,0x00,0x00,0x00]
@@ -1303,50 +1303,50 @@ v_cvt_flr_i32_f32_e64 v5, src_scc
v_cvt_flr_i32_f32_e64 v255, -|0xaf123456|
// GFX1250: v_cvt_floor_i32_f32_e64 v255, -|0xaf123456| ; encoding: [0xff,0x01,0x8d,0xd5,0xff,0x00,0x00,0x20,0x56,0x34,0x12,0xaf]
-v_cvt_i16_f16_e64 v5, v1
-// GFX1250: v_cvt_i16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd3,0xd5,0x01,0x01,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, v1.l
+// GFX1250: v_cvt_i16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd3,0xd5,0x01,0x01,0x00,0x00]
-v_cvt_i16_f16_e64 v5, v255
-// GFX1250: v_cvt_i16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd3,0xd5,0xff,0x01,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, v255.l
+// GFX1250: v_cvt_i16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd3,0xd5,0xff,0x01,0x00,0x00]
-v_cvt_i16_f16_e64 v5, s1
-// GFX1250: v_cvt_i16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd3,0xd5,0x01,0x00,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, s1
+// GFX1250: v_cvt_i16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd3,0xd5,0x01,0x00,0x00,0x00]
-v_cvt_i16_f16_e64 v5, s105
-// GFX1250: v_cvt_i16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd3,0xd5,0x69,0x00,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, s105
+// GFX1250: v_cvt_i16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd3,0xd5,0x69,0x00,0x00,0x00]
-v_cvt_i16_f16_e64 v5, vcc_lo
-// GFX1250: v_cvt_i16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd3,0xd5,0x6a,0x00,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, vcc_lo
+// GFX1250: v_cvt_i16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd3,0xd5,0x6a,0x00,0x00,0x00]
-v_cvt_i16_f16_e64 v5, vcc_hi
-// GFX1250: v_cvt_i16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd3,0xd5,0x6b,0x00,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, vcc_hi
+// GFX1250: v_cvt_i16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd3,0xd5,0x6b,0x00,0x00,0x00]
-v_cvt_i16_f16_e64 v5, ttmp15
-// GFX1250: v_cvt_i16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd3,0xd5,0x7b,0x00,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, ttmp15
+// GFX1250: v_cvt_i16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd3,0xd5,0x7b,0x00,0x00,0x00]
-v_cvt_i16_f16_e64 v5, m0
-// GFX1250: v_cvt_i16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd3,0xd5,0x7d,0x00,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, m0
+// GFX1250: v_cvt_i16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd3,0xd5,0x7d,0x00,0x00,0x00]
-v_cvt_i16_f16_e64 v5, exec_lo
-// GFX1250: v_cvt_i16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd3,0xd5,0x7e,0x00,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, exec_lo
+// GFX1250: v_cvt_i16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd3,0xd5,0x7e,0x00,0x00,0x00]
-v_cvt_i16_f16_e64 v5, exec_hi
-// GFX1250: v_cvt_i16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd3,0xd5,0x7f,0x00,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, exec_hi
+// GFX1250: v_cvt_i16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd3,0xd5,0x7f,0x00,0x00,0x00]
-v_cvt_i16_f16_e64 v5, null
-// GFX1250: v_cvt_i16_f16_e64 v5, null ; encoding: [0x05,0x00,0xd3,0xd5,0x7c,0x00,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, null
+// GFX1250: v_cvt_i16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd3,0xd5,0x7c,0x00,0x00,0x00]
-v_cvt_i16_f16_e64 v5, -1
-// GFX1250: v_cvt_i16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd3,0xd5,0xc1,0x00,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, -1
+// GFX1250: v_cvt_i16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd3,0xd5,0xc1,0x00,0x00,0x00]
-v_cvt_i16_f16_e64 v5, 0.5
-// GFX1250: v_cvt_i16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xd3,0xd5,0xf0,0x00,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, 0.5
+// GFX1250: v_cvt_i16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xd3,0xd5,0xf0,0x00,0x00,0x00]
-v_cvt_i16_f16_e64 v5, src_scc
-// GFX1250: v_cvt_i16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xd3,0xd5,0xfd,0x00,0x00,0x00]
+v_cvt_i16_f16_e64 v5.l, src_scc
+// GFX1250: v_cvt_i16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xd3,0xd5,0xfd,0x00,0x00,0x00]
-v_cvt_i16_f16_e64 v255, -|0xfe0b| clamp
-// GFX1250: v_cvt_i16_f16_e64 v255, -|0xfe0b| clamp ; encoding: [0xff,0x81,0xd3,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00]
+v_cvt_i16_f16_e64 v255.l, -|0xfe0b| clamp
+// GFX1250: v_cvt_i16_f16_e64 v255.l, -|0xfe0b| clamp ; encoding: [0xff,0x81,0xd3,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00]
v_cvt_i16_f16 v1.l, v128.l
// GFX1250: v_cvt_i16_f16_e64 v1.l, v128.l ; encoding: [0x01,0x00,0xd3,0xd5,0x80,0x01,0x00,0x00]
@@ -1435,11 +1435,11 @@ v_cvt_i32_f64_e64 v5, -|src_scc|
v_cvt_i32_f64_e64 v255, 0xaf123456 clamp
// GFX1250: v_cvt_i32_f64_e64 v255, 0xaf123456 clamp ; encoding: [0xff,0x80,0x83,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf]
-v_cvt_i32_i16_e64 v5, v1
-// GFX1250: v_cvt_i32_i16_e64 v5, v1 ; encoding: [0x05,0x00,0xea,0xd5,0x01,0x01,0x00,0x00]
+v_cvt_i32_i16_e64 v5, v1.l
+// GFX1250: v_cvt_i32_i16_e64 v5, v1.l ; encoding: [0x05,0x00,0xea,0xd5,0x01,0x01,0x00,0x00]
-v_cvt_i32_i16_e64 v5, v255
-// GFX1250: v_cvt_i32_i16_e64 v5, v255 ; encoding: [0x05,0x00,0xea,0xd5,0xff,0x01,0x00,0x00]
+v_cvt_i32_i16_e64 v5, v255.l
+// GFX1250: v_cvt_i32_i16_e64 v5, v255.l ; encoding: [0x05,0x00,0xea,0xd5,0xff,0x01,0x00,0x00]
v_cvt_i32_i16_e64 v5, s1
// GFX1250: v_cvt_i32_i16_e64 v5, s1 ; encoding: [0x05,0x00,0xea,0xd5,0x01,0x00,0x00,0x00]
@@ -1531,50 +1531,50 @@ v_cvt_nearest_i32_f32_e64 v5, src_scc
v_cvt_nearest_i32_f32_e64 v255, -|0xaf123456|
// GFX1250: v_cvt_nearest_i32_f32_e64 v255, -|0xaf123456| ; encoding: [0xff,0x01,0x8c,0xd5,0xff,0x00,0x00,0x20,0x56,0x34,0x12,0xaf]
-v_cvt_norm_i16_f16_e64 v5, v1
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xe3,0xd5,0x01,0x01,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, v1.l
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe3,0xd5,0x01,0x01,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, v255
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xe3,0xd5,0xff,0x01,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, v255.l
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe3,0xd5,0xff,0x01,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, s1
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xe3,0xd5,0x01,0x00,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, s1
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe3,0xd5,0x01,0x00,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, s105
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xe3,0xd5,0x69,0x00,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, s105
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe3,0xd5,0x69,0x00,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, vcc_lo
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe3,0xd5,0x6a,0x00,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, vcc_lo
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe3,0xd5,0x6a,0x00,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, vcc_hi
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe3,0xd5,0x6b,0x00,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, vcc_hi
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe3,0xd5,0x6b,0x00,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, ttmp15
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe3,0xd5,0x7b,0x00,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, ttmp15
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe3,0xd5,0x7b,0x00,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, m0
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xe3,0xd5,0x7d,0x00,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, m0
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe3,0xd5,0x7d,0x00,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, exec_lo
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe3,0xd5,0x7e,0x00,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, exec_lo
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe3,0xd5,0x7e,0x00,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, exec_hi
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe3,0xd5,0x7f,0x00,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, exec_hi
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe3,0xd5,0x7f,0x00,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, null
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, null ; encoding: [0x05,0x00,0xe3,0xd5,0x7c,0x00,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, null
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xe3,0xd5,0x7c,0x00,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, -1
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xe3,0xd5,0xc1,0x00,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, -1
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe3,0xd5,0xc1,0x00,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, 0.5
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xe3,0xd5,0xf0,0x00,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, 0.5
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xe3,0xd5,0xf0,0x00,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v5, src_scc
-// GFX1250: v_cvt_norm_i16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xe3,0xd5,0xfd,0x00,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v5.l, src_scc
+// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xe3,0xd5,0xfd,0x00,0x00,0x00]
-v_cvt_norm_i16_f16_e64 v255, -|0xfe0b|
-// GFX1250: v_cvt_norm_i16_f16_e64 v255, -|0xfe0b| ; encoding: [0xff,0x01,0xe3,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00]
+v_cvt_norm_i16_f16_e64 v255.l, -|0xfe0b|
+// GFX1250: v_cvt_norm_i16_f16_e64 v255.l, -|0xfe0b| ; encoding: [0xff,0x01,0xe3,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00]
v_cvt_norm_i16_f16 v1.l, v128.l
// GFX1250: v_cvt_norm_i16_f16_e64 v1.l, v128.l ; encoding: [0x01,0x00,0xe3,0xd5,0x80,0x01,0x00,0x00]
@@ -1582,50 +1582,50 @@ v_cvt_norm_i16_f16 v1.l, v128.l
v_cvt_norm_i16_f16 v1.l, v128.h
// GFX1250: v_cvt_norm_i16_f16_e64 v1.l, v128.h op_sel:[1,0] ; encoding: [0x01,0x08,0xe3,0xd5,0x80,0x01,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, v1
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xe4,0xd5,0x01,0x01,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, v1.l
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe4,0xd5,0x01,0x01,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, v255
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xe4,0xd5,0xff,0x01,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, v255.l
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe4,0xd5,0xff,0x01,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, s1
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xe4,0xd5,0x01,0x00,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, s1
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe4,0xd5,0x01,0x00,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, s105
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xe4,0xd5,0x69,0x00,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, s105
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe4,0xd5,0x69,0x00,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, vcc_lo
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe4,0xd5,0x6a,0x00,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, vcc_lo
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe4,0xd5,0x6a,0x00,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, vcc_hi
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe4,0xd5,0x6b,0x00,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, vcc_hi
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe4,0xd5,0x6b,0x00,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, ttmp15
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe4,0xd5,0x7b,0x00,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, ttmp15
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe4,0xd5,0x7b,0x00,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, m0
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xe4,0xd5,0x7d,0x00,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, m0
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe4,0xd5,0x7d,0x00,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, exec_lo
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe4,0xd5,0x7e,0x00,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, exec_lo
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe4,0xd5,0x7e,0x00,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, exec_hi
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe4,0xd5,0x7f,0x00,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, exec_hi
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe4,0xd5,0x7f,0x00,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, null
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, null ; encoding: [0x05,0x00,0xe4,0xd5,0x7c,0x00,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, null
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xe4,0xd5,0x7c,0x00,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, -1
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xe4,0xd5,0xc1,0x00,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, -1
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe4,0xd5,0xc1,0x00,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, 0.5
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xe4,0xd5,0xf0,0x00,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, 0.5
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xe4,0xd5,0xf0,0x00,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v5, src_scc
-// GFX1250: v_cvt_norm_u16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xe4,0xd5,0xfd,0x00,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v5.l, src_scc
+// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xe4,0xd5,0xfd,0x00,0x00,0x00]
-v_cvt_norm_u16_f16_e64 v255, -|0xfe0b|
-// GFX1250: v_cvt_norm_u16_f16_e64 v255, -|0xfe0b| ; encoding: [0xff,0x01,0xe4,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00]
+v_cvt_norm_u16_f16_e64 v255.l, -|0xfe0b|
+// GFX1250: v_cvt_norm_u16_f16_e64 v255.l, -|0xfe0b| ; encoding: [0xff,0x01,0xe4,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00]
v_cvt_norm_u16_f16 v1.l, v128.l
// GFX1250: v_cvt_norm_u16_f16_e64 v1.l, v128.l ; encoding: [0x01,0x00,0xe4,0xd5,0x80,0x01,0x00,0x00]
@@ -1723,50 +1723,50 @@ v_cvt_rpi_i32_f32_e64 v5, src_scc
v_cvt_rpi_i32_f32_e64 v255, -|0xaf123456|
// GFX1250: v_cvt_nearest_i32_f32_e64 v255, -|0xaf123456| ; encoding: [0xff,0x01,0x8c,0xd5,0xff,0x00,0x00,0x20,0x56,0x34,0x12,0xaf]
-v_cvt_u16_f16_e64 v5, v1
-// GFX1250: v_cvt_u16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd2,0xd5,0x01,0x01,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, v1.l
+// GFX1250: v_cvt_u16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd2,0xd5,0x01,0x01,0x00,0x00]
-v_cvt_u16_f16_e64 v5, v255
-// GFX1250: v_cvt_u16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd2,0xd5,0xff,0x01,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, v255.l
+// GFX1250: v_cvt_u16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd2,0xd5,0xff,0x01,0x00,0x00]
-v_cvt_u16_f16_e64 v5, s1
-// GFX1250: v_cvt_u16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd2,0xd5,0x01,0x00,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, s1
+// GFX1250: v_cvt_u16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd2,0xd5,0x01,0x00,0x00,0x00]
-v_cvt_u16_f16_e64 v5, s105
-// GFX1250: v_cvt_u16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd2,0xd5,0x69,0x00,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, s105
+// GFX1250: v_cvt_u16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd2,0xd5,0x69,0x00,0x00,0x00]
-v_cvt_u16_f16_e64 v5, vcc_lo
-// GFX1250: v_cvt_u16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd2,0xd5,0x6a,0x00,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, vcc_lo
+// GFX1250: v_cvt_u16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd2,0xd5,0x6a,0x00,0x00,0x00]
-v_cvt_u16_f16_e64 v5, vcc_hi
-// GFX1250: v_cvt_u16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd2,0xd5,0x6b,0x00,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, vcc_hi
+// GFX1250: v_cvt_u16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd2,0xd5,0x6b,0x00,0x00,0x00]
-v_cvt_u16_f16_e64 v5, ttmp15
-// GFX1250: v_cvt_u16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd2,0xd5,0x7b,0x00,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, ttmp15
+// GFX1250: v_cvt_u16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd2,0xd5,0x7b,0x00,0x00,0x00]
-v_cvt_u16_f16_e64 v5, m0
-// GFX1250: v_cvt_u16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd2,0xd5,0x7d,0x00,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, m0
+// GFX1250: v_cvt_u16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd2,0xd5,0x7d,0x00,0x00,0x00]
-v_cvt_u16_f16_e64 v5, exec_lo
-// GFX1250: v_cvt_u16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd2,0xd5,0x7e,0x00,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, exec_lo
+// GFX1250: v_cvt_u16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd2,0xd5,0x7e,0x00,0x00,0x00]
-v_cvt_u16_f16_e64 v5, exec_hi
-// GFX1250: v_cvt_u16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd2,0xd5,0x7f,0x00,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, exec_hi
+// GFX1250: v_cvt_u16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd2,0xd5,0x7f,0x00,0x00,0x00]
-v_cvt_u16_f16_e64 v5, null
-// GFX1250: v_cvt_u16_f16_e64 v5, null ; encoding: [0x05,0x00,0xd2,0xd5,0x7c,0x00,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, null
+// GFX1250: v_cvt_u16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd2,0xd5,0x7c,0x00,0x00,0x00]
-v_cvt_u16_f16_e64 v5, -1
-// GFX1250: v_cvt_u16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd2,0xd5,0xc1,0x00,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, -1
+// GFX1250: v_cvt_u16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd2,0xd5,0xc1,0x00,0x00,0x00]
-v_cvt_u16_f16_e64 v5, 0.5
-// GFX1250: v_cvt_u16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xd2,0xd5,0xf0,0x00,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, 0.5
+// GFX1250: v_cvt_u16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xd2,0xd5,0xf0,0x00,0x00,0x00]
-v_cvt_u16_f16_e64 v5, src_scc
-// GFX1250: v_cvt_u16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xd2,0xd5,0xfd,0x00,0x00,0x00]
+v_cvt_u16_f16_e64 v5.l, src_scc
+// GFX1250: v_cvt_u16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xd2,0xd5,0xfd,0x00,0x00,0x00]
-v_cvt_u16_f16_e64 v255, -|0xfe0b| clamp
-// GFX1250: v_cvt_u16_f16_e64 v255, -|0xfe0b| clamp ; encoding: [0xff,0x81,0xd2,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00]
+v_cvt_u16_f16_e64 v255.l, -|0xfe0b| clamp
+// GFX1250: v_cvt_u16_f16_e64 v255.l, -|0xfe0b| clamp ; encoding: [0xff,0x81,0xd2,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00]
v_cvt_u16_f16 v1.l, v128.l
// GFX1250: v_cvt_u16_f16_e64 v1.l, v128.l ; encoding: [0x01,0x00,0xd2,0xd5,0x80,0x01,0x00,0x00]
@@ -1855,11 +1855,11 @@ v_cvt_u32_f64_e64 v5, -|src_scc|
v_cvt_u32_f64_e64 v255, 0xaf123456 clamp
// GFX1250: v_cvt_u32_f64_e64 v255, 0xaf123456 clamp ; encoding: [0xff,0x80,0x95,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf]
-v_cvt_u32_u16_e64 v5, v1
-// GFX1250: v_cvt_u32_u16_e64 v5, v1 ; encoding: [0x05,0x00,0xeb,0xd5,0x01,0x01,0x00,0x00]
+v_cvt_u32_u16_e64 v5, v1.l
+// GFX1250: v_cvt_u32_u16_e64 v5, v1.l ; encoding: [0x05,0x00,0xeb,0xd5,0x01,0x01,0x00,0x00]
-v_cvt_u32_u16_e64 v5, v255
-// GFX1250: v_cvt_u32_u16_e64 v5, v255 ; encoding: [0x05,0x00,0xeb,0xd5,0xff,0x01,0x00,0x00]
+v_cvt_u32_u16_e64 v5, v255.l
+// GFX1250: v_cvt_u32_u16_e64 v5, v255.l ; encoding: [0x05,0x00,0xeb,0xd5,0xff,0x01,0x00,0x00]
v_cvt_u32_u16_e64 v5, s1
// GFX1250: v_cvt_u32_u16_e64 v5, s1 ; encoding: [0x05,0x00,0xeb,0xd5,0x01,0x00,0x00,0x00]
@@ -1906,50 +1906,50 @@ v_cvt_u32_u16 v1, v128.l
v_cvt_u32_u16 v1, v128.h
// GFX1250: v_cvt_u32_u16_e64 v1, v128.h op_sel:[1,0] ; encoding: [0x01,0x08,0xeb,0xd5,0x80,0x01,0x00,0x00]
-v_exp_f16_e64 v5, v1
-// GFX1250: v_exp_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd8,0xd5,0x01,0x01,0x00,0x00]
+v_exp_f16_e64 v5.l, v1.l
+// GFX1250: v_exp_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd8,0xd5,0x01,0x01,0x00,0x00]
-v_exp_f16_e64 v5, v255
-// GFX1250: v_exp_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd8,0xd5,0xff,0x01,0x00,0x00]
+v_exp_f16_e64 v5.l, v255.l
+// GFX1250: v_exp_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd8,0xd5,0xff,0x01,0x00,0x00]
-v_exp_f16_e64 v5, s1
-// GFX1250: v_exp_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd8,0xd5,0x01,0x00,0x00,0x00]
+v_exp_f16_e64 v5.l, s1
+// GFX1250: v_exp_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd8,0xd5,0x01,0x00,0x00,0x00]
-v_exp_f16_e64 v5, s105
-// GFX1250: v_exp_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd8,0xd5,0x69,0x00,0x00,0x00]
+v_exp_f16_e64 v5.l, s105
+// GFX1250: v_exp_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd8,0xd5,0x69,0x00,0x00,0x00]
-v_exp_f16_e64 v5, vcc_lo
-// GFX1250: v_exp_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd8,0xd5,0x6a,0x00,0x00,0x00]
+v_exp_f16_e64 v5.l, vcc_lo
+// GFX1250: v_exp_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd8,0xd5,0x6a,0x00,0x00,0x00]
-v_exp_f16_e64 v5, vcc_hi
-// GFX1250: v_exp_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd8,0xd5,0x6b,0x00,0x00,0x00]
+v_exp_f16_e64 v5.l, vcc_hi
+// GFX1250: v_exp_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd8,0xd5,0x6b,0x00,0x00,0x00]
-v_exp_f16_e64 v5, ttmp15
-// GFX1250: v_exp_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd8,0xd5,0x7b,0x00,0x00,0x00]
+v_exp_f16_e64 v5.l, ttmp15
+// GFX1250: v_exp_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd8,0xd5,0x7b,0x00,0x00,0x00]
-v_exp_f16_e64 v5, m0
-// GFX1250: v_exp_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd8,0xd5,0x7d,0x00,0x00,0x00]
+v_exp_f16_e64 v5.l, m0
+// GFX1250: v_exp_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd8,0xd5,0x7d,0x00,0x00,0x00]
-v_exp_f16_e64 v5, exec_lo
-// GFX1250: v_exp_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd8,0xd5,0x7e,0x00,0x00,0x00]
+v_exp_f16_e64 v5.l, exec_lo
+// GFX1250: v_exp_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd8,0xd5,0x7e,0x00,0x00,0x00]
-v_exp_f16_e64 v5, exec_hi
-// GFX1250: v_exp_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd8,0xd5,0x7f,0x00,0x00,0x00]
+v_exp_f16_e64 v5.l, exec_hi
+// GFX1250: v_exp_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd8,0xd5,0x7f,0x00,0x00,0x00]
-v_exp_f16_e64 v5, null
-// GFX1250: v_exp_f16_e64 v5, null ; encoding: [0x05,0x00,0xd8,0xd5,0x7c,0x00,0x00,0x00]
+v_exp_f16_e64 v5.l, null
+// GFX1250: v_exp_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd8,0xd5,0x7c,0x00,0x00,0x00]
-v_exp_f16_e64 v5, -1
-// GFX1250: v_exp_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd8,0xd5,0xc1,0x00,0x00,0x00]
+v_exp_f16_e64 v5.l, -1
+// GFX1250: v_exp_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd8,0xd5,0xc1,0x00,0x00,0x00]
-v_exp_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_exp_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd8,0xd5,0xf0,0x00,0x00,0x08]
+v_exp_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_exp_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd8,0xd5,0xf0,0x00,0x00,0x08]
-v_exp_f16_e64 v5, src_scc mul:4
-// GFX1250: v_exp_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd8,0xd5,0xfd,0x00,0x00,0x10]
+v_exp_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_exp_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd8,0xd5,0xfd,0x00,0x00,0x10]
-v_exp_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_exp_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd8,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_exp_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_exp_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd8,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_exp_f16 v1.h, v128.l
// GFX1250: v_exp_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd8,0xd5,0x80,0x01,0x00,0x00]
@@ -2137,50 +2137,50 @@ v_ffbl_b32_e64 v5, src_scc
v_ffbl_b32_e64 v255, 0xaf123456
// GFX1250: v_ctz_i32_b32_e64 v255, 0xaf123456 ; encoding: [0xff,0x00,0xba,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf]
-v_floor_f16_e64 v5, v1
-// GFX1250: v_floor_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xdb,0xd5,0x01,0x01,0x00,0x00]
+v_floor_f16_e64 v5.l, v1.l
+// GFX1250: v_floor_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xdb,0xd5,0x01,0x01,0x00,0x00]
-v_floor_f16_e64 v5, v255
-// GFX1250: v_floor_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xdb,0xd5,0xff,0x01,0x00,0x00]
+v_floor_f16_e64 v5.l, v255.l
+// GFX1250: v_floor_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xdb,0xd5,0xff,0x01,0x00,0x00]
-v_floor_f16_e64 v5, s1
-// GFX1250: v_floor_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xdb,0xd5,0x01,0x00,0x00,0x00]
+v_floor_f16_e64 v5.l, s1
+// GFX1250: v_floor_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xdb,0xd5,0x01,0x00,0x00,0x00]
-v_floor_f16_e64 v5, s105
-// GFX1250: v_floor_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xdb,0xd5,0x69,0x00,0x00,0x00]
+v_floor_f16_e64 v5.l, s105
+// GFX1250: v_floor_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xdb,0xd5,0x69,0x00,0x00,0x00]
-v_floor_f16_e64 v5, vcc_lo
-// GFX1250: v_floor_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xdb,0xd5,0x6a,0x00,0x00,0x00]
+v_floor_f16_e64 v5.l, vcc_lo
+// GFX1250: v_floor_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xdb,0xd5,0x6a,0x00,0x00,0x00]
-v_floor_f16_e64 v5, vcc_hi
-// GFX1250: v_floor_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xdb,0xd5,0x6b,0x00,0x00,0x00]
+v_floor_f16_e64 v5.l, vcc_hi
+// GFX1250: v_floor_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xdb,0xd5,0x6b,0x00,0x00,0x00]
-v_floor_f16_e64 v5, ttmp15
-// GFX1250: v_floor_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xdb,0xd5,0x7b,0x00,0x00,0x00]
+v_floor_f16_e64 v5.l, ttmp15
+// GFX1250: v_floor_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xdb,0xd5,0x7b,0x00,0x00,0x00]
-v_floor_f16_e64 v5, m0
-// GFX1250: v_floor_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xdb,0xd5,0x7d,0x00,0x00,0x00]
+v_floor_f16_e64 v5.l, m0
+// GFX1250: v_floor_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xdb,0xd5,0x7d,0x00,0x00,0x00]
-v_floor_f16_e64 v5, exec_lo
-// GFX1250: v_floor_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xdb,0xd5,0x7e,0x00,0x00,0x00]
+v_floor_f16_e64 v5.l, exec_lo
+// GFX1250: v_floor_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xdb,0xd5,0x7e,0x00,0x00,0x00]
-v_floor_f16_e64 v5, exec_hi
-// GFX1250: v_floor_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xdb,0xd5,0x7f,0x00,0x00,0x00]
+v_floor_f16_e64 v5.l, exec_hi
+// GFX1250: v_floor_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xdb,0xd5,0x7f,0x00,0x00,0x00]
-v_floor_f16_e64 v5, null
-// GFX1250: v_floor_f16_e64 v5, null ; encoding: [0x05,0x00,0xdb,0xd5,0x7c,0x00,0x00,0x00]
+v_floor_f16_e64 v5.l, null
+// GFX1250: v_floor_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xdb,0xd5,0x7c,0x00,0x00,0x00]
-v_floor_f16_e64 v5, -1
-// GFX1250: v_floor_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xdb,0xd5,0xc1,0x00,0x00,0x00]
+v_floor_f16_e64 v5.l, -1
+// GFX1250: v_floor_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xdb,0xd5,0xc1,0x00,0x00,0x00]
-v_floor_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_floor_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xdb,0xd5,0xf0,0x00,0x00,0x08]
+v_floor_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_floor_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xdb,0xd5,0xf0,0x00,0x00,0x08]
-v_floor_f16_e64 v5, src_scc mul:4
-// GFX1250: v_floor_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xdb,0xd5,0xfd,0x00,0x00,0x10]
+v_floor_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_floor_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xdb,0xd5,0xfd,0x00,0x00,0x10]
-v_floor_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_floor_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdb,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_floor_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_floor_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdb,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_floor_f16 v1.h, v128.l
// GFX1250: v_floor_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xdb,0xd5,0x80,0x01,0x00,0x00]
@@ -2269,50 +2269,50 @@ v_floor_f64_e64 v[6:7], -|src_scc| mul:4
v_floor_f64_e64 v[254:255], 0xaf123456 clamp div:2
// GFX1250: v_floor_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0x9a,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf]
-v_fract_f16_e64 v5, v1
-// GFX1250: v_fract_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xdf,0xd5,0x01,0x01,0x00,0x00]
+v_fract_f16_e64 v5.l, v1.l
+// GFX1250: v_fract_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xdf,0xd5,0x01,0x01,0x00,0x00]
-v_fract_f16_e64 v5, v255
-// GFX1250: v_fract_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xdf,0xd5,0xff,0x01,0x00,0x00]
+v_fract_f16_e64 v5.l, v255.l
+// GFX1250: v_fract_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xdf,0xd5,0xff,0x01,0x00,0x00]
-v_fract_f16_e64 v5, s1
-// GFX1250: v_fract_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xdf,0xd5,0x01,0x00,0x00,0x00]
+v_fract_f16_e64 v5.l, s1
+// GFX1250: v_fract_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xdf,0xd5,0x01,0x00,0x00,0x00]
-v_fract_f16_e64 v5, s105
-// GFX1250: v_fract_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xdf,0xd5,0x69,0x00,0x00,0x00]
+v_fract_f16_e64 v5.l, s105
+// GFX1250: v_fract_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xdf,0xd5,0x69,0x00,0x00,0x00]
-v_fract_f16_e64 v5, vcc_lo
-// GFX1250: v_fract_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xdf,0xd5,0x6a,0x00,0x00,0x00]
+v_fract_f16_e64 v5.l, vcc_lo
+// GFX1250: v_fract_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xdf,0xd5,0x6a,0x00,0x00,0x00]
-v_fract_f16_e64 v5, vcc_hi
-// GFX1250: v_fract_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xdf,0xd5,0x6b,0x00,0x00,0x00]
+v_fract_f16_e64 v5.l, vcc_hi
+// GFX1250: v_fract_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xdf,0xd5,0x6b,0x00,0x00,0x00]
-v_fract_f16_e64 v5, ttmp15
-// GFX1250: v_fract_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xdf,0xd5,0x7b,0x00,0x00,0x00]
+v_fract_f16_e64 v5.l, ttmp15
+// GFX1250: v_fract_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xdf,0xd5,0x7b,0x00,0x00,0x00]
-v_fract_f16_e64 v5, m0
-// GFX1250: v_fract_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xdf,0xd5,0x7d,0x00,0x00,0x00]
+v_fract_f16_e64 v5.l, m0
+// GFX1250: v_fract_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xdf,0xd5,0x7d,0x00,0x00,0x00]
-v_fract_f16_e64 v5, exec_lo
-// GFX1250: v_fract_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xdf,0xd5,0x7e,0x00,0x00,0x00]
+v_fract_f16_e64 v5.l, exec_lo
+// GFX1250: v_fract_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xdf,0xd5,0x7e,0x00,0x00,0x00]
-v_fract_f16_e64 v5, exec_hi
-// GFX1250: v_fract_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xdf,0xd5,0x7f,0x00,0x00,0x00]
+v_fract_f16_e64 v5.l, exec_hi
+// GFX1250: v_fract_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xdf,0xd5,0x7f,0x00,0x00,0x00]
-v_fract_f16_e64 v5, null
-// GFX1250: v_fract_f16_e64 v5, null ; encoding: [0x05,0x00,0xdf,0xd5,0x7c,0x00,0x00,0x00]
+v_fract_f16_e64 v5.l, null
+// GFX1250: v_fract_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xdf,0xd5,0x7c,0x00,0x00,0x00]
-v_fract_f16_e64 v5, -1
-// GFX1250: v_fract_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xdf,0xd5,0xc1,0x00,0x00,0x00]
+v_fract_f16_e64 v5.l, -1
+// GFX1250: v_fract_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xdf,0xd5,0xc1,0x00,0x00,0x00]
-v_fract_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_fract_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xdf,0xd5,0xf0,0x00,0x00,0x08]
+v_fract_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_fract_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xdf,0xd5,0xf0,0x00,0x00,0x08]
-v_fract_f16_e64 v5, src_scc mul:4
-// GFX1250: v_fract_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xdf,0xd5,0xfd,0x00,0x00,0x10]
+v_fract_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_fract_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xdf,0xd5,0xfd,0x00,0x00,0x10]
-v_fract_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_fract_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdf,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_fract_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_fract_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdf,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_fract_f16 v1.h, v128.l
// GFX1250: v_fract_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xdf,0xd5,0x80,0x01,0x00,0x00]
@@ -2401,50 +2401,50 @@ v_fract_f64_e64 v[6:7], -|src_scc| mul:4
v_fract_f64_e64 v[254:255], 0xaf123456 clamp div:2
// GFX1250: v_fract_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0xbe,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf]
-v_frexp_exp_i16_f16_e64 v5, v1
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xda,0xd5,0x01,0x01,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, v1.l
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xda,0xd5,0x01,0x01,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, v255
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xda,0xd5,0xff,0x01,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, v255.l
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xda,0xd5,0xff,0x01,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, s1
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xda,0xd5,0x01,0x00,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, s1
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xda,0xd5,0x01,0x00,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, s105
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xda,0xd5,0x69,0x00,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, s105
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xda,0xd5,0x69,0x00,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, vcc_lo
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xda,0xd5,0x6a,0x00,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, vcc_lo
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xda,0xd5,0x6a,0x00,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, vcc_hi
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xda,0xd5,0x6b,0x00,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, vcc_hi
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xda,0xd5,0x6b,0x00,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, ttmp15
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xda,0xd5,0x7b,0x00,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, ttmp15
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xda,0xd5,0x7b,0x00,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, m0
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xda,0xd5,0x7d,0x00,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, m0
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xda,0xd5,0x7d,0x00,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, exec_lo
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xda,0xd5,0x7e,0x00,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, exec_lo
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xda,0xd5,0x7e,0x00,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, exec_hi
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xda,0xd5,0x7f,0x00,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, exec_hi
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xda,0xd5,0x7f,0x00,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, null
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, null ; encoding: [0x05,0x00,0xda,0xd5,0x7c,0x00,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, null
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xda,0xd5,0x7c,0x00,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, -1
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xda,0xd5,0xc1,0x00,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, -1
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xda,0xd5,0xc1,0x00,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, 0.5
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xda,0xd5,0xf0,0x00,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, 0.5
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xda,0xd5,0xf0,0x00,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v5, src_scc
-// GFX1250: v_frexp_exp_i16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xda,0xd5,0xfd,0x00,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v5.l, src_scc
+// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xda,0xd5,0xfd,0x00,0x00,0x00]
-v_frexp_exp_i16_f16_e64 v255, -|0xfe0b|
-// GFX1250: v_frexp_exp_i16_f16_e64 v255, -|0xfe0b| ; encoding: [0xff,0x01,0xda,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00]
+v_frexp_exp_i16_f16_e64 v255.l, -|0xfe0b|
+// GFX1250: v_frexp_exp_i16_f16_e64 v255.l, -|0xfe0b| ; encoding: [0xff,0x01,0xda,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00]
v_frexp_exp_i16_f16 v1.h, v128.l
// GFX1250: v_frexp_exp_i16_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xda,0xd5,0x80,0x01,0x00,0x00]
@@ -2533,50 +2533,50 @@ v_frexp_exp_i32_f64_e64 v5, -|src_scc|
v_frexp_exp_i32_f64_e64 v255, 0xaf123456
// GFX1250: v_frexp_exp_i32_f64_e64 v255, 0xaf123456 ; encoding: [0xff,0x00,0xbc,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf]
-v_frexp_mant_f16_e64 v5, v1
-// GFX1250: v_frexp_mant_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd9,0xd5,0x01,0x01,0x00,0x00]
+v_frexp_mant_f16_e64 v5.l, v1.l
+// GFX1250: v_frexp_mant_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd9,0xd5,0x01,0x01,0x00,0x00]
-v_frexp_mant_f16_e64 v5, v255
-// GFX1250: v_frexp_mant_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd9,0xd5,0xff,0x01,0x00,0x00]
+v_frexp_mant_f16_e64 v5.l, v255.l
+// GFX1250: v_frexp_mant_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd9,0xd5,0xff,0x01,0x00,0x00]
-v_frexp_mant_f16_e64 v5, s1
-// GFX1250: v_frexp_mant_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd9,0xd5,0x01,0x00,0x00,0x00]
+v_frexp_mant_f16_e64 v5.l, s1
+// GFX1250: v_frexp_mant_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd9,0xd5,0x01,0x00,0x00,0x00]
-v_frexp_mant_f16_e64 v5, s105
-// GFX1250: v_frexp_mant_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd9,0xd5,0x69,0x00,0x00,0x00]
+v_frexp_mant_f16_e64 v5.l, s105
+// GFX1250: v_frexp_mant_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd9,0xd5,0x69,0x00,0x00,0x00]
-v_frexp_mant_f16_e64 v5, vcc_lo
-// GFX1250: v_frexp_mant_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd9,0xd5,0x6a,0x00,0x00,0x00]
+v_frexp_mant_f16_e64 v5.l, vcc_lo
+// GFX1250: v_frexp_mant_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd9,0xd5,0x6a,0x00,0x00,0x00]
-v_frexp_mant_f16_e64 v5, vcc_hi
-// GFX1250: v_frexp_mant_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd9,0xd5,0x6b,0x00,0x00,0x00]
+v_frexp_mant_f16_e64 v5.l, vcc_hi
+// GFX1250: v_frexp_mant_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd9,0xd5,0x6b,0x00,0x00,0x00]
-v_frexp_mant_f16_e64 v5, ttmp15
-// GFX1250: v_frexp_mant_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd9,0xd5,0x7b,0x00,0x00,0x00]
+v_frexp_mant_f16_e64 v5.l, ttmp15
+// GFX1250: v_frexp_mant_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd9,0xd5,0x7b,0x00,0x00,0x00]
-v_frexp_mant_f16_e64 v5, m0
-// GFX1250: v_frexp_mant_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd9,0xd5,0x7d,0x00,0x00,0x00]
+v_frexp_mant_f16_e64 v5.l, m0
+// GFX1250: v_frexp_mant_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd9,0xd5,0x7d,0x00,0x00,0x00]
-v_frexp_mant_f16_e64 v5, exec_lo
-// GFX1250: v_frexp_mant_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd9,0xd5,0x7e,0x00,0x00,0x00]
+v_frexp_mant_f16_e64 v5.l, exec_lo
+// GFX1250: v_frexp_mant_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd9,0xd5,0x7e,0x00,0x00,0x00]
-v_frexp_mant_f16_e64 v5, exec_hi
-// GFX1250: v_frexp_mant_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd9,0xd5,0x7f,0x00,0x00,0x00]
+v_frexp_mant_f16_e64 v5.l, exec_hi
+// GFX1250: v_frexp_mant_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd9,0xd5,0x7f,0x00,0x00,0x00]
-v_frexp_mant_f16_e64 v5, null
-// GFX1250: v_frexp_mant_f16_e64 v5, null ; encoding: [0x05,0x00,0xd9,0xd5,0x7c,0x00,0x00,0x00]
+v_frexp_mant_f16_e64 v5.l, null
+// GFX1250: v_frexp_mant_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd9,0xd5,0x7c,0x00,0x00,0x00]
-v_frexp_mant_f16_e64 v5, -1
-// GFX1250: v_frexp_mant_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd9,0xd5,0xc1,0x00,0x00,0x00]
+v_frexp_mant_f16_e64 v5.l, -1
+// GFX1250: v_frexp_mant_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd9,0xd5,0xc1,0x00,0x00,0x00]
-v_frexp_mant_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_frexp_mant_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd9,0xd5,0xf0,0x00,0x00,0x08]
+v_frexp_mant_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_frexp_mant_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd9,0xd5,0xf0,0x00,0x00,0x08]
-v_frexp_mant_f16_e64 v5, src_scc mul:4
-// GFX1250: v_frexp_mant_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd9,0xd5,0xfd,0x00,0x00,0x10]
+v_frexp_mant_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_frexp_mant_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd9,0xd5,0xfd,0x00,0x00,0x10]
-v_frexp_mant_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_frexp_mant_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd9,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_frexp_mant_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_frexp_mant_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd9,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_frexp_mant_f16 v1.h, v128.l
// GFX1250: v_frexp_mant_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd9,0xd5,0x80,0x01,0x00,0x00]
@@ -2665,50 +2665,50 @@ v_frexp_mant_f64_e64 v[6:7], -|src_scc| mul:4
v_frexp_mant_f64_e64 v[254:255], 0xaf123456 clamp div:2
// GFX1250: v_frexp_mant_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0xbd,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf]
-v_log_f16_e64 v5, v1
-// GFX1250: v_log_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd7,0xd5,0x01,0x01,0x00,0x00]
+v_log_f16_e64 v5.l, v1.l
+// GFX1250: v_log_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd7,0xd5,0x01,0x01,0x00,0x00]
-v_log_f16_e64 v5, v255
-// GFX1250: v_log_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd7,0xd5,0xff,0x01,0x00,0x00]
+v_log_f16_e64 v5.l, v255.l
+// GFX1250: v_log_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd7,0xd5,0xff,0x01,0x00,0x00]
-v_log_f16_e64 v5, s1
-// GFX1250: v_log_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd7,0xd5,0x01,0x00,0x00,0x00]
+v_log_f16_e64 v5.l, s1
+// GFX1250: v_log_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd7,0xd5,0x01,0x00,0x00,0x00]
-v_log_f16_e64 v5, s105
-// GFX1250: v_log_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd7,0xd5,0x69,0x00,0x00,0x00]
+v_log_f16_e64 v5.l, s105
+// GFX1250: v_log_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd7,0xd5,0x69,0x00,0x00,0x00]
-v_log_f16_e64 v5, vcc_lo
-// GFX1250: v_log_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd7,0xd5,0x6a,0x00,0x00,0x00]
+v_log_f16_e64 v5.l, vcc_lo
+// GFX1250: v_log_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd7,0xd5,0x6a,0x00,0x00,0x00]
-v_log_f16_e64 v5, vcc_hi
-// GFX1250: v_log_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd7,0xd5,0x6b,0x00,0x00,0x00]
+v_log_f16_e64 v5.l, vcc_hi
+// GFX1250: v_log_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd7,0xd5,0x6b,0x00,0x00,0x00]
-v_log_f16_e64 v5, ttmp15
-// GFX1250: v_log_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd7,0xd5,0x7b,0x00,0x00,0x00]
+v_log_f16_e64 v5.l, ttmp15
+// GFX1250: v_log_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd7,0xd5,0x7b,0x00,0x00,0x00]
-v_log_f16_e64 v5, m0
-// GFX1250: v_log_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd7,0xd5,0x7d,0x00,0x00,0x00]
+v_log_f16_e64 v5.l, m0
+// GFX1250: v_log_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd7,0xd5,0x7d,0x00,0x00,0x00]
-v_log_f16_e64 v5, exec_lo
-// GFX1250: v_log_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd7,0xd5,0x7e,0x00,0x00,0x00]
+v_log_f16_e64 v5.l, exec_lo
+// GFX1250: v_log_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd7,0xd5,0x7e,0x00,0x00,0x00]
-v_log_f16_e64 v5, exec_hi
-// GFX1250: v_log_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd7,0xd5,0x7f,0x00,0x00,0x00]
+v_log_f16_e64 v5.l, exec_hi
+// GFX1250: v_log_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd7,0xd5,0x7f,0x00,0x00,0x00]
-v_log_f16_e64 v5, null
-// GFX1250: v_log_f16_e64 v5, null ; encoding: [0x05,0x00,0xd7,0xd5,0x7c,0x00,0x00,0x00]
+v_log_f16_e64 v5.l, null
+// GFX1250: v_log_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd7,0xd5,0x7c,0x00,0x00,0x00]
-v_log_f16_e64 v5, -1
-// GFX1250: v_log_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd7,0xd5,0xc1,0x00,0x00,0x00]
+v_log_f16_e64 v5.l, -1
+// GFX1250: v_log_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd7,0xd5,0xc1,0x00,0x00,0x00]
-v_log_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_log_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd7,0xd5,0xf0,0x00,0x00,0x08]
+v_log_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_log_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd7,0xd5,0xf0,0x00,0x00,0x08]
-v_log_f16_e64 v5, src_scc mul:4
-// GFX1250: v_log_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd7,0xd5,0xfd,0x00,0x00,0x10]
+v_log_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_log_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd7,0xd5,0xfd,0x00,0x00,0x10]
-v_log_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_log_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd7,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_log_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_log_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd7,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_log_f16 v1.h, v128.l
// GFX1250: v_log_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd7,0xd5,0x80,0x01,0x00,0x00]
@@ -2872,50 +2872,50 @@ v_movrelsd_b32_e64 v255, v255
v_nop_e64
// GFX1250: v_nop ; encoding: [0x00,0x00,0x80,0xd5,0x00,0x00,0x00,0x00]
-v_not_b16_e64 v5, v1
-// GFX1250: v_not_b16_e64 v5, v1 ; encoding: [0x05,0x00,0xe9,0xd5,0x01,0x01,0x00,0x00]
+v_not_b16_e64 v5.l, v1.l
+// GFX1250: v_not_b16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe9,0xd5,0x01,0x01,0x00,0x00]
-v_not_b16_e64 v5, v255
-// GFX1250: v_not_b16_e64 v5, v255 ; encoding: [0x05,0x00,0xe9,0xd5,0xff,0x01,0x00,0x00]
+v_not_b16_e64 v5.l, v255.l
+// GFX1250: v_not_b16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe9,0xd5,0xff,0x01,0x00,0x00]
-v_not_b16_e64 v5, s1
-// GFX1250: v_not_b16_e64 v5, s1 ; encoding: [0x05,0x00,0xe9,0xd5,0x01,0x00,0x00,0x00]
+v_not_b16_e64 v5.l, s1
+// GFX1250: v_not_b16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe9,0xd5,0x01,0x00,0x00,0x00]
-v_not_b16_e64 v5, s105
-// GFX1250: v_not_b16_e64 v5, s105 ; encoding: [0x05,0x00,0xe9,0xd5,0x69,0x00,0x00,0x00]
+v_not_b16_e64 v5.l, s105
+// GFX1250: v_not_b16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe9,0xd5,0x69,0x00,0x00,0x00]
-v_not_b16_e64 v5, vcc_lo
-// GFX1250: v_not_b16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe9,0xd5,0x6a,0x00,0x00,0x00]
+v_not_b16_e64 v5.l, vcc_lo
+// GFX1250: v_not_b16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe9,0xd5,0x6a,0x00,0x00,0x00]
-v_not_b16_e64 v5, vcc_hi
-// GFX1250: v_not_b16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe9,0xd5,0x6b,0x00,0x00,0x00]
+v_not_b16_e64 v5.l, vcc_hi
+// GFX1250: v_not_b16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe9,0xd5,0x6b,0x00,0x00,0x00]
-v_not_b16_e64 v5, ttmp15
-// GFX1250: v_not_b16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe9,0xd5,0x7b,0x00,0x00,0x00]
+v_not_b16_e64 v5.l, ttmp15
+// GFX1250: v_not_b16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe9,0xd5,0x7b,0x00,0x00,0x00]
-v_not_b16_e64 v5, m0
-// GFX1250: v_not_b16_e64 v5, m0 ; encoding: [0x05,0x00,0xe9,0xd5,0x7d,0x00,0x00,0x00]
+v_not_b16_e64 v5.l, m0
+// GFX1250: v_not_b16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe9,0xd5,0x7d,0x00,0x00,0x00]
-v_not_b16_e64 v5, exec_lo
-// GFX1250: v_not_b16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe9,0xd5,0x7e,0x00,0x00,0x00]
+v_not_b16_e64 v5.l, exec_lo
+// GFX1250: v_not_b16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe9,0xd5,0x7e,0x00,0x00,0x00]
-v_not_b16_e64 v5, exec_hi
-// GFX1250: v_not_b16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe9,0xd5,0x7f,0x00,0x00,0x00]
+v_not_b16_e64 v5.l, exec_hi
+// GFX1250: v_not_b16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe9,0xd5,0x7f,0x00,0x00,0x00]
-v_not_b16_e64 v5, null
-// GFX1250: v_not_b16_e64 v5, null ; encoding: [0x05,0x00,0xe9,0xd5,0x7c,0x00,0x00,0x00]
+v_not_b16_e64 v5.l, null
+// GFX1250: v_not_b16_e64 v5.l, null ; encoding: [0x05,0x00,0xe9,0xd5,0x7c,0x00,0x00,0x00]
-v_not_b16_e64 v5, -1
-// GFX1250: v_not_b16_e64 v5, -1 ; encoding: [0x05,0x00,0xe9,0xd5,0xc1,0x00,0x00,0x00]
+v_not_b16_e64 v5.l, -1
+// GFX1250: v_not_b16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe9,0xd5,0xc1,0x00,0x00,0x00]
-v_not_b16_e64 v5, 0.5
-// GFX1250: v_not_b16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xe9,0xd5,0xf0,0x00,0x00,0x00]
+v_not_b16_e64 v5.l, 0.5
+// GFX1250: v_not_b16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xe9,0xd5,0xf0,0x00,0x00,0x00]
-v_not_b16_e64 v5, src_scc
-// GFX1250: v_not_b16_e64 v5, src_scc ; encoding: [0x05,0x00,0xe9,0xd5,0xfd,0x00,0x00,0x00]
+v_not_b16_e64 v5.l, src_scc
+// GFX1250: v_not_b16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xe9,0xd5,0xfd,0x00,0x00,0x00]
-v_not_b16_e64 v255, 0xfe0b
-// GFX1250: v_not_b16_e64 v255, 0xfe0b ; encoding: [0xff,0x00,0xe9,0xd5,0xff,0x00,0x00,0x00,0x0b,0xfe,0x00,0x00]
+v_not_b16_e64 v255.l, 0xfe0b
+// GFX1250: v_not_b16_e64 v255.l, 0xfe0b ; encoding: [0xff,0x00,0xe9,0xd5,0xff,0x00,0x00,0x00,0x0b,0xfe,0x00,0x00]
v_not_b16 v1.h, v128.l
// GFX1250: v_not_b16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xe9,0xd5,0x80,0x01,0x00,0x00]
@@ -2971,50 +2971,50 @@ v_not_b32_e64 v255, 0xaf123456
v_pipeflush_e64
// GFX1250: v_pipeflush ; encoding: [0x00,0x00,0x9b,0xd5,0x00,0x00,0x00,0x00]
-v_rcp_f16_e64 v5, v1
-// GFX1250: v_rcp_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd4,0xd5,0x01,0x01,0x00,0x00]
+v_rcp_f16_e64 v5.l, v1.l
+// GFX1250: v_rcp_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd4,0xd5,0x01,0x01,0x00,0x00]
-v_rcp_f16_e64 v5, v255
-// GFX1250: v_rcp_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd4,0xd5,0xff,0x01,0x00,0x00]
+v_rcp_f16_e64 v5.l, v255.l
+// GFX1250: v_rcp_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd4,0xd5,0xff,0x01,0x00,0x00]
-v_rcp_f16_e64 v5, s1
-// GFX1250: v_rcp_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd4,0xd5,0x01,0x00,0x00,0x00]
+v_rcp_f16_e64 v5.l, s1
+// GFX1250: v_rcp_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd4,0xd5,0x01,0x00,0x00,0x00]
-v_rcp_f16_e64 v5, s105
-// GFX1250: v_rcp_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd4,0xd5,0x69,0x00,0x00,0x00]
+v_rcp_f16_e64 v5.l, s105
+// GFX1250: v_rcp_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd4,0xd5,0x69,0x00,0x00,0x00]
-v_rcp_f16_e64 v5, vcc_lo
-// GFX1250: v_rcp_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd4,0xd5,0x6a,0x00,0x00,0x00]
+v_rcp_f16_e64 v5.l, vcc_lo
+// GFX1250: v_rcp_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd4,0xd5,0x6a,0x00,0x00,0x00]
-v_rcp_f16_e64 v5, vcc_hi
-// GFX1250: v_rcp_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd4,0xd5,0x6b,0x00,0x00,0x00]
+v_rcp_f16_e64 v5.l, vcc_hi
+// GFX1250: v_rcp_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd4,0xd5,0x6b,0x00,0x00,0x00]
-v_rcp_f16_e64 v5, ttmp15
-// GFX1250: v_rcp_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd4,0xd5,0x7b,0x00,0x00,0x00]
+v_rcp_f16_e64 v5.l, ttmp15
+// GFX1250: v_rcp_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd4,0xd5,0x7b,0x00,0x00,0x00]
-v_rcp_f16_e64 v5, m0
-// GFX1250: v_rcp_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd4,0xd5,0x7d,0x00,0x00,0x00]
+v_rcp_f16_e64 v5.l, m0
+// GFX1250: v_rcp_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd4,0xd5,0x7d,0x00,0x00,0x00]
-v_rcp_f16_e64 v5, exec_lo
-// GFX1250: v_rcp_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd4,0xd5,0x7e,0x00,0x00,0x00]
+v_rcp_f16_e64 v5.l, exec_lo
+// GFX1250: v_rcp_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd4,0xd5,0x7e,0x00,0x00,0x00]
-v_rcp_f16_e64 v5, exec_hi
-// GFX1250: v_rcp_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd4,0xd5,0x7f,0x00,0x00,0x00]
+v_rcp_f16_e64 v5.l, exec_hi
+// GFX1250: v_rcp_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd4,0xd5,0x7f,0x00,0x00,0x00]
-v_rcp_f16_e64 v5, null
-// GFX1250: v_rcp_f16_e64 v5, null ; encoding: [0x05,0x00,0xd4,0xd5,0x7c,0x00,0x00,0x00]
+v_rcp_f16_e64 v5.l, null
+// GFX1250: v_rcp_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd4,0xd5,0x7c,0x00,0x00,0x00]
-v_rcp_f16_e64 v5, -1
-// GFX1250: v_rcp_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd4,0xd5,0xc1,0x00,0x00,0x00]
+v_rcp_f16_e64 v5.l, -1
+// GFX1250: v_rcp_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd4,0xd5,0xc1,0x00,0x00,0x00]
-v_rcp_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_rcp_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd4,0xd5,0xf0,0x00,0x00,0x08]
+v_rcp_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_rcp_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd4,0xd5,0xf0,0x00,0x00,0x08]
-v_rcp_f16_e64 v5, src_scc mul:4
-// GFX1250: v_rcp_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd4,0xd5,0xfd,0x00,0x00,0x10]
+v_rcp_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_rcp_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd4,0xd5,0xfd,0x00,0x00,0x10]
-v_rcp_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_rcp_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd4,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_rcp_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_rcp_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd4,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_rcp_f16 v1.h, v128.l
// GFX1250: v_rcp_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd4,0xd5,0x80,0x01,0x00,0x00]
@@ -3148,50 +3148,50 @@ v_rcp_iflag_f32_e64 v5, src_scc mul:4
v_rcp_iflag_f32_e64 v255, -|0xaf123456| clamp div:2
// GFX1250: v_rcp_iflag_f32_e64 v255, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0xab,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf]
-v_rndne_f16_e64 v5, v1
-// GFX1250: v_rndne_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xde,0xd5,0x01,0x01,0x00,0x00]
+v_rndne_f16_e64 v5.l, v1.l
+// GFX1250: v_rndne_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xde,0xd5,0x01,0x01,0x00,0x00]
-v_rndne_f16_e64 v5, v255
-// GFX1250: v_rndne_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xde,0xd5,0xff,0x01,0x00,0x00]
+v_rndne_f16_e64 v5.l, v255.l
+// GFX1250: v_rndne_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xde,0xd5,0xff,0x01,0x00,0x00]
-v_rndne_f16_e64 v5, s1
-// GFX1250: v_rndne_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xde,0xd5,0x01,0x00,0x00,0x00]
+v_rndne_f16_e64 v5.l, s1
+// GFX1250: v_rndne_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xde,0xd5,0x01,0x00,0x00,0x00]
-v_rndne_f16_e64 v5, s105
-// GFX1250: v_rndne_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xde,0xd5,0x69,0x00,0x00,0x00]
+v_rndne_f16_e64 v5.l, s105
+// GFX1250: v_rndne_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xde,0xd5,0x69,0x00,0x00,0x00]
-v_rndne_f16_e64 v5, vcc_lo
-// GFX1250: v_rndne_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xde,0xd5,0x6a,0x00,0x00,0x00]
+v_rndne_f16_e64 v5.l, vcc_lo
+// GFX1250: v_rndne_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xde,0xd5,0x6a,0x00,0x00,0x00]
-v_rndne_f16_e64 v5, vcc_hi
-// GFX1250: v_rndne_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xde,0xd5,0x6b,0x00,0x00,0x00]
+v_rndne_f16_e64 v5.l, vcc_hi
+// GFX1250: v_rndne_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xde,0xd5,0x6b,0x00,0x00,0x00]
-v_rndne_f16_e64 v5, ttmp15
-// GFX1250: v_rndne_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xde,0xd5,0x7b,0x00,0x00,0x00]
+v_rndne_f16_e64 v5.l, ttmp15
+// GFX1250: v_rndne_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xde,0xd5,0x7b,0x00,0x00,0x00]
-v_rndne_f16_e64 v5, m0
-// GFX1250: v_rndne_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xde,0xd5,0x7d,0x00,0x00,0x00]
+v_rndne_f16_e64 v5.l, m0
+// GFX1250: v_rndne_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xde,0xd5,0x7d,0x00,0x00,0x00]
-v_rndne_f16_e64 v5, exec_lo
-// GFX1250: v_rndne_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xde,0xd5,0x7e,0x00,0x00,0x00]
+v_rndne_f16_e64 v5.l, exec_lo
+// GFX1250: v_rndne_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xde,0xd5,0x7e,0x00,0x00,0x00]
-v_rndne_f16_e64 v5, exec_hi
-// GFX1250: v_rndne_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xde,0xd5,0x7f,0x00,0x00,0x00]
+v_rndne_f16_e64 v5.l, exec_hi
+// GFX1250: v_rndne_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xde,0xd5,0x7f,0x00,0x00,0x00]
-v_rndne_f16_e64 v5, null
-// GFX1250: v_rndne_f16_e64 v5, null ; encoding: [0x05,0x00,0xde,0xd5,0x7c,0x00,0x00,0x00]
+v_rndne_f16_e64 v5.l, null
+// GFX1250: v_rndne_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xde,0xd5,0x7c,0x00,0x00,0x00]
-v_rndne_f16_e64 v5, -1
-// GFX1250: v_rndne_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xde,0xd5,0xc1,0x00,0x00,0x00]
+v_rndne_f16_e64 v5.l, -1
+// GFX1250: v_rndne_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xde,0xd5,0xc1,0x00,0x00,0x00]
-v_rndne_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_rndne_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xde,0xd5,0xf0,0x00,0x00,0x08]
+v_rndne_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_rndne_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xde,0xd5,0xf0,0x00,0x00,0x08]
-v_rndne_f16_e64 v5, src_scc mul:4
-// GFX1250: v_rndne_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xde,0xd5,0xfd,0x00,0x00,0x10]
+v_rndne_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_rndne_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xde,0xd5,0xfd,0x00,0x00,0x10]
-v_rndne_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_rndne_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xde,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_rndne_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_rndne_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xde,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_rndne_f16 v1.h, v128.l
// GFX1250: v_rndne_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xde,0xd5,0x80,0x01,0x00,0x00]
@@ -3280,50 +3280,50 @@ v_rndne_f64_e64 v[6:7], -|src_scc| mul:4
v_rndne_f64_e64 v[254:255], 0xaf123456 clamp div:2
// GFX1250: v_rndne_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0x99,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf]
-v_rsq_f16_e64 v5, v1
-// GFX1250: v_rsq_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd6,0xd5,0x01,0x01,0x00,0x00]
+v_rsq_f16_e64 v5.l, v1.l
+// GFX1250: v_rsq_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd6,0xd5,0x01,0x01,0x00,0x00]
-v_rsq_f16_e64 v5, v255
-// GFX1250: v_rsq_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd6,0xd5,0xff,0x01,0x00,0x00]
+v_rsq_f16_e64 v5.l, v255.l
+// GFX1250: v_rsq_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd6,0xd5,0xff,0x01,0x00,0x00]
-v_rsq_f16_e64 v5, s1
-// GFX1250: v_rsq_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd6,0xd5,0x01,0x00,0x00,0x00]
+v_rsq_f16_e64 v5.l, s1
+// GFX1250: v_rsq_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd6,0xd5,0x01,0x00,0x00,0x00]
-v_rsq_f16_e64 v5, s105
-// GFX1250: v_rsq_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd6,0xd5,0x69,0x00,0x00,0x00]
+v_rsq_f16_e64 v5.l, s105
+// GFX1250: v_rsq_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd6,0xd5,0x69,0x00,0x00,0x00]
-v_rsq_f16_e64 v5, vcc_lo
-// GFX1250: v_rsq_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd6,0xd5,0x6a,0x00,0x00,0x00]
+v_rsq_f16_e64 v5.l, vcc_lo
+// GFX1250: v_rsq_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd6,0xd5,0x6a,0x00,0x00,0x00]
-v_rsq_f16_e64 v5, vcc_hi
-// GFX1250: v_rsq_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd6,0xd5,0x6b,0x00,0x00,0x00]
+v_rsq_f16_e64 v5.l, vcc_hi
+// GFX1250: v_rsq_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd6,0xd5,0x6b,0x00,0x00,0x00]
-v_rsq_f16_e64 v5, ttmp15
-// GFX1250: v_rsq_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd6,0xd5,0x7b,0x00,0x00,0x00]
+v_rsq_f16_e64 v5.l, ttmp15
+// GFX1250: v_rsq_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd6,0xd5,0x7b,0x00,0x00,0x00]
-v_rsq_f16_e64 v5, m0
-// GFX1250: v_rsq_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd6,0xd5,0x7d,0x00,0x00,0x00]
+v_rsq_f16_e64 v5.l, m0
+// GFX1250: v_rsq_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd6,0xd5,0x7d,0x00,0x00,0x00]
-v_rsq_f16_e64 v5, exec_lo
-// GFX1250: v_rsq_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd6,0xd5,0x7e,0x00,0x00,0x00]
+v_rsq_f16_e64 v5.l, exec_lo
+// GFX1250: v_rsq_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd6,0xd5,0x7e,0x00,0x00,0x00]
-v_rsq_f16_e64 v5, exec_hi
-// GFX1250: v_rsq_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd6,0xd5,0x7f,0x00,0x00,0x00]
+v_rsq_f16_e64 v5.l, exec_hi
+// GFX1250: v_rsq_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd6,0xd5,0x7f,0x00,0x00,0x00]
-v_rsq_f16_e64 v5, null
-// GFX1250: v_rsq_f16_e64 v5, null ; encoding: [0x05,0x00,0xd6,0xd5,0x7c,0x00,0x00,0x00]
+v_rsq_f16_e64 v5.l, null
+// GFX1250: v_rsq_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd6,0xd5,0x7c,0x00,0x00,0x00]
-v_rsq_f16_e64 v5, -1
-// GFX1250: v_rsq_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd6,0xd5,0xc1,0x00,0x00,0x00]
+v_rsq_f16_e64 v5.l, -1
+// GFX1250: v_rsq_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd6,0xd5,0xc1,0x00,0x00,0x00]
-v_rsq_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_rsq_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd6,0xd5,0xf0,0x00,0x00,0x08]
+v_rsq_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_rsq_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd6,0xd5,0xf0,0x00,0x00,0x08]
-v_rsq_f16_e64 v5, src_scc mul:4
-// GFX1250: v_rsq_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd6,0xd5,0xfd,0x00,0x00,0x10]
+v_rsq_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_rsq_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd6,0xd5,0xfd,0x00,0x00,0x10]
-v_rsq_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_rsq_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd6,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_rsq_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_rsq_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd6,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_rsq_f16 v1.h, v128.l
// GFX1250: v_rsq_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd6,0xd5,0x80,0x01,0x00,0x00]
@@ -3412,50 +3412,50 @@ v_rsq_f64_e64 v[6:7], -|src_scc| mul:4
v_rsq_f64_e64 v[254:255], 0xaf123456 clamp div:2
// GFX1250: v_rsq_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0xb1,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf]
-v_sat_pk_u8_i16_e64 v5, v1
-// GFX1250: v_sat_pk_u8_i16_e64 v5, v1 ; encoding: [0x05,0x00,0xe2,0xd5,0x01,0x01,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, v1
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, v1 ; encoding: [0x05,0x00,0xe2,0xd5,0x01,0x01,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, v255
-// GFX1250: v_sat_pk_u8_i16_e64 v5, v255 ; encoding: [0x05,0x00,0xe2,0xd5,0xff,0x01,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, v255
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, v255 ; encoding: [0x05,0x00,0xe2,0xd5,0xff,0x01,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, s1
-// GFX1250: v_sat_pk_u8_i16_e64 v5, s1 ; encoding: [0x05,0x00,0xe2,0xd5,0x01,0x00,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, s1
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe2,0xd5,0x01,0x00,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, s105
-// GFX1250: v_sat_pk_u8_i16_e64 v5, s105 ; encoding: [0x05,0x00,0xe2,0xd5,0x69,0x00,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, s105
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe2,0xd5,0x69,0x00,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, vcc_lo
-// GFX1250: v_sat_pk_u8_i16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe2,0xd5,0x6a,0x00,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, vcc_lo
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe2,0xd5,0x6a,0x00,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, vcc_hi
-// GFX1250: v_sat_pk_u8_i16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe2,0xd5,0x6b,0x00,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, vcc_hi
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe2,0xd5,0x6b,0x00,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, ttmp15
-// GFX1250: v_sat_pk_u8_i16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe2,0xd5,0x7b,0x00,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, ttmp15
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe2,0xd5,0x7b,0x00,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, m0
-// GFX1250: v_sat_pk_u8_i16_e64 v5, m0 ; encoding: [0x05,0x00,0xe2,0xd5,0x7d,0x00,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, m0
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe2,0xd5,0x7d,0x00,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, exec_lo
-// GFX1250: v_sat_pk_u8_i16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe2,0xd5,0x7e,0x00,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, exec_lo
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe2,0xd5,0x7e,0x00,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, exec_hi
-// GFX1250: v_sat_pk_u8_i16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe2,0xd5,0x7f,0x00,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, exec_hi
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe2,0xd5,0x7f,0x00,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, null
-// GFX1250: v_sat_pk_u8_i16_e64 v5, null ; encoding: [0x05,0x00,0xe2,0xd5,0x7c,0x00,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, null
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, null ; encoding: [0x05,0x00,0xe2,0xd5,0x7c,0x00,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, -1
-// GFX1250: v_sat_pk_u8_i16_e64 v5, -1 ; encoding: [0x05,0x00,0xe2,0xd5,0xc1,0x00,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, -1
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe2,0xd5,0xc1,0x00,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, 0.5
-// GFX1250: v_sat_pk_u8_i16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xe2,0xd5,0xf0,0x00,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, 0.5
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xe2,0xd5,0xf0,0x00,0x00,0x00]
-v_sat_pk_u8_i16_e64 v5, src_scc
-// GFX1250: v_sat_pk_u8_i16_e64 v5, src_scc ; encoding: [0x05,0x00,0xe2,0xd5,0xfd,0x00,0x00,0x00]
+v_sat_pk_u8_i16_e64 v5.l, src_scc
+// GFX1250: v_sat_pk_u8_i16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xe2,0xd5,0xfd,0x00,0x00,0x00]
-v_sat_pk_u8_i16_e64 v255, 0xfe0b
-// GFX1250: v_sat_pk_u8_i16_e64 v255, 0xfe0b ; encoding: [0xff,0x00,0xe2,0xd5,0xff,0x00,0x00,0x00,0x0b,0xfe,0x00,0x00]
+v_sat_pk_u8_i16_e64 v255.l, 0xfe0b
+// GFX1250: v_sat_pk_u8_i16_e64 v255.l, 0xfe0b ; encoding: [0xff,0x00,0xe2,0xd5,0xff,0x00,0x00,0x00,0x0b,0xfe,0x00,0x00]
v_sat_pk_u8_i16 v128.l, v1
// GFX1250: v_sat_pk_u8_i16_e64 v128.l, v1 ; encoding: [0x80,0x00,0xe2,0xd5,0x01,0x01,0x00,0x00]
@@ -3463,50 +3463,50 @@ v_sat_pk_u8_i16 v128.l, v1
v_sat_pk_u8_i16 v128.h, v1
// GFX1250: v_sat_pk_u8_i16_e64 v128.h, v1 op_sel:[0,1] ; encoding: [0x80,0x40,0xe2,0xd5,0x01,0x01,0x00,0x00]
-v_sin_f16_e64 v5, v1
-// GFX1250: v_sin_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xe0,0xd5,0x01,0x01,0x00,0x00]
+v_sin_f16_e64 v5.l, v1.l
+// GFX1250: v_sin_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe0,0xd5,0x01,0x01,0x00,0x00]
-v_sin_f16_e64 v5, v255
-// GFX1250: v_sin_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xe0,0xd5,0xff,0x01,0x00,0x00]
+v_sin_f16_e64 v5.l, v255.l
+// GFX1250: v_sin_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe0,0xd5,0xff,0x01,0x00,0x00]
-v_sin_f16_e64 v5, s1
-// GFX1250: v_sin_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xe0,0xd5,0x01,0x00,0x00,0x00]
+v_sin_f16_e64 v5.l, s1
+// GFX1250: v_sin_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe0,0xd5,0x01,0x00,0x00,0x00]
-v_sin_f16_e64 v5, s105
-// GFX1250: v_sin_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xe0,0xd5,0x69,0x00,0x00,0x00]
+v_sin_f16_e64 v5.l, s105
+// GFX1250: v_sin_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe0,0xd5,0x69,0x00,0x00,0x00]
-v_sin_f16_e64 v5, vcc_lo
-// GFX1250: v_sin_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe0,0xd5,0x6a,0x00,0x00,0x00]
+v_sin_f16_e64 v5.l, vcc_lo
+// GFX1250: v_sin_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe0,0xd5,0x6a,0x00,0x00,0x00]
-v_sin_f16_e64 v5, vcc_hi
-// GFX1250: v_sin_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe0,0xd5,0x6b,0x00,0x00,0x00]
+v_sin_f16_e64 v5.l, vcc_hi
+// GFX1250: v_sin_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe0,0xd5,0x6b,0x00,0x00,0x00]
-v_sin_f16_e64 v5, ttmp15
-// GFX1250: v_sin_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe0,0xd5,0x7b,0x00,0x00,0x00]
+v_sin_f16_e64 v5.l, ttmp15
+// GFX1250: v_sin_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe0,0xd5,0x7b,0x00,0x00,0x00]
-v_sin_f16_e64 v5, m0
-// GFX1250: v_sin_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xe0,0xd5,0x7d,0x00,0x00,0x00]
+v_sin_f16_e64 v5.l, m0
+// GFX1250: v_sin_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe0,0xd5,0x7d,0x00,0x00,0x00]
-v_sin_f16_e64 v5, exec_lo
-// GFX1250: v_sin_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe0,0xd5,0x7e,0x00,0x00,0x00]
+v_sin_f16_e64 v5.l, exec_lo
+// GFX1250: v_sin_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe0,0xd5,0x7e,0x00,0x00,0x00]
-v_sin_f16_e64 v5, exec_hi
-// GFX1250: v_sin_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe0,0xd5,0x7f,0x00,0x00,0x00]
+v_sin_f16_e64 v5.l, exec_hi
+// GFX1250: v_sin_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe0,0xd5,0x7f,0x00,0x00,0x00]
-v_sin_f16_e64 v5, null
-// GFX1250: v_sin_f16_e64 v5, null ; encoding: [0x05,0x00,0xe0,0xd5,0x7c,0x00,0x00,0x00]
+v_sin_f16_e64 v5.l, null
+// GFX1250: v_sin_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xe0,0xd5,0x7c,0x00,0x00,0x00]
-v_sin_f16_e64 v5, -1
-// GFX1250: v_sin_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xe0,0xd5,0xc1,0x00,0x00,0x00]
+v_sin_f16_e64 v5.l, -1
+// GFX1250: v_sin_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe0,0xd5,0xc1,0x00,0x00,0x00]
-v_sin_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_sin_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xe0,0xd5,0xf0,0x00,0x00,0x08]
+v_sin_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_sin_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xe0,0xd5,0xf0,0x00,0x00,0x08]
-v_sin_f16_e64 v5, src_scc mul:4
-// GFX1250: v_sin_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xe0,0xd5,0xfd,0x00,0x00,0x10]
+v_sin_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_sin_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xe0,0xd5,0xfd,0x00,0x00,0x10]
-v_sin_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_sin_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xe0,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_sin_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_sin_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xe0,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_sin_f16 v1.h, v128.l
// GFX1250: v_sin_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xe0,0xd5,0x80,0x01,0x00,0x00]
@@ -3559,50 +3559,50 @@ v_sin_f32_e64 v5, src_scc mul:4
v_sin_f32_e64 v255, -|0xaf123456| clamp div:2
// GFX1250: v_sin_f32_e64 v255, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0xb5,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf]
-v_sqrt_f16_e64 v5, v1
-// GFX1250: v_sqrt_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd5,0xd5,0x01,0x01,0x00,0x00]
+v_sqrt_f16_e64 v5.l, v1.l
+// GFX1250: v_sqrt_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd5,0xd5,0x01,0x01,0x00,0x00]
-v_sqrt_f16_e64 v5, v255
-// GFX1250: v_sqrt_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd5,0xd5,0xff,0x01,0x00,0x00]
+v_sqrt_f16_e64 v5.l, v255.l
+// GFX1250: v_sqrt_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd5,0xd5,0xff,0x01,0x00,0x00]
-v_sqrt_f16_e64 v5, s1
-// GFX1250: v_sqrt_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd5,0xd5,0x01,0x00,0x00,0x00]
+v_sqrt_f16_e64 v5.l, s1
+// GFX1250: v_sqrt_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd5,0xd5,0x01,0x00,0x00,0x00]
-v_sqrt_f16_e64 v5, s105
-// GFX1250: v_sqrt_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd5,0xd5,0x69,0x00,0x00,0x00]
+v_sqrt_f16_e64 v5.l, s105
+// GFX1250: v_sqrt_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd5,0xd5,0x69,0x00,0x00,0x00]
-v_sqrt_f16_e64 v5, vcc_lo
-// GFX1250: v_sqrt_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd5,0xd5,0x6a,0x00,0x00,0x00]
+v_sqrt_f16_e64 v5.l, vcc_lo
+// GFX1250: v_sqrt_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd5,0xd5,0x6a,0x00,0x00,0x00]
-v_sqrt_f16_e64 v5, vcc_hi
-// GFX1250: v_sqrt_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd5,0xd5,0x6b,0x00,0x00,0x00]
+v_sqrt_f16_e64 v5.l, vcc_hi
+// GFX1250: v_sqrt_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd5,0xd5,0x6b,0x00,0x00,0x00]
-v_sqrt_f16_e64 v5, ttmp15
-// GFX1250: v_sqrt_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd5,0xd5,0x7b,0x00,0x00,0x00]
+v_sqrt_f16_e64 v5.l, ttmp15
+// GFX1250: v_sqrt_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd5,0xd5,0x7b,0x00,0x00,0x00]
-v_sqrt_f16_e64 v5, m0
-// GFX1250: v_sqrt_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd5,0xd5,0x7d,0x00,0x00,0x00]
+v_sqrt_f16_e64 v5.l, m0
+// GFX1250: v_sqrt_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd5,0xd5,0x7d,0x00,0x00,0x00]
-v_sqrt_f16_e64 v5, exec_lo
-// GFX1250: v_sqrt_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd5,0xd5,0x7e,0x00,0x00,0x00]
+v_sqrt_f16_e64 v5.l, exec_lo
+// GFX1250: v_sqrt_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd5,0xd5,0x7e,0x00,0x00,0x00]
-v_sqrt_f16_e64 v5, exec_hi
-// GFX1250: v_sqrt_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd5,0xd5,0x7f,0x00,0x00,0x00]
+v_sqrt_f16_e64 v5.l, exec_hi
+// GFX1250: v_sqrt_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd5,0xd5,0x7f,0x00,0x00,0x00]
-v_sqrt_f16_e64 v5, null
-// GFX1250: v_sqrt_f16_e64 v5, null ; encoding: [0x05,0x00,0xd5,0xd5,0x7c,0x00,0x00,0x00]
+v_sqrt_f16_e64 v5.l, null
+// GFX1250: v_sqrt_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd5,0xd5,0x7c,0x00,0x00,0x00]
-v_sqrt_f16_e64 v5, -1
-// GFX1250: v_sqrt_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd5,0xd5,0xc1,0x00,0x00,0x00]
+v_sqrt_f16_e64 v5.l, -1
+// GFX1250: v_sqrt_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd5,0xd5,0xc1,0x00,0x00,0x00]
-v_sqrt_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_sqrt_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd5,0xd5,0xf0,0x00,0x00,0x08]
+v_sqrt_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_sqrt_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd5,0xd5,0xf0,0x00,0x00,0x08]
-v_sqrt_f16_e64 v5, src_scc mul:4
-// GFX1250: v_sqrt_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd5,0xd5,0xfd,0x00,0x00,0x10]
+v_sqrt_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_sqrt_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd5,0xd5,0xfd,0x00,0x00,0x10]
-v_sqrt_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_sqrt_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd5,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_sqrt_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_sqrt_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd5,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_sqrt_f16 v1.h, v128.l
// GFX1250: v_sqrt_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd5,0xd5,0x80,0x01,0x00,0x00]
@@ -3691,50 +3691,50 @@ v_sqrt_f64_e64 v[6:7], -|src_scc| mul:4
v_sqrt_f64_e64 v[254:255], 0xaf123456 clamp div:2
// GFX1250: v_sqrt_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0xb4,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf]
-v_trunc_f16_e64 v5, v1
-// GFX1250: v_trunc_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xdd,0xd5,0x01,0x01,0x00,0x00]
+v_trunc_f16_e64 v5.l, v1.l
+// GFX1250: v_trunc_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xdd,0xd5,0x01,0x01,0x00,0x00]
-v_trunc_f16_e64 v5, v255
-// GFX1250: v_trunc_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xdd,0xd5,0xff,0x01,0x00,0x00]
+v_trunc_f16_e64 v5.l, v255.l
+// GFX1250: v_trunc_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xdd,0xd5,0xff,0x01,0x00,0x00]
-v_trunc_f16_e64 v5, s1
-// GFX1250: v_trunc_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xdd,0xd5,0x01,0x00,0x00,0x00]
+v_trunc_f16_e64 v5.l, s1
+// GFX1250: v_trunc_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xdd,0xd5,0x01,0x00,0x00,0x00]
-v_trunc_f16_e64 v5, s105
-// GFX1250: v_trunc_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xdd,0xd5,0x69,0x00,0x00,0x00]
+v_trunc_f16_e64 v5.l, s105
+// GFX1250: v_trunc_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xdd,0xd5,0x69,0x00,0x00,0x00]
-v_trunc_f16_e64 v5, vcc_lo
-// GFX1250: v_trunc_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xdd,0xd5,0x6a,0x00,0x00,0x00]
+v_trunc_f16_e64 v5.l, vcc_lo
+// GFX1250: v_trunc_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xdd,0xd5,0x6a,0x00,0x00,0x00]
-v_trunc_f16_e64 v5, vcc_hi
-// GFX1250: v_trunc_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xdd,0xd5,0x6b,0x00,0x00,0x00]
+v_trunc_f16_e64 v5.l, vcc_hi
+// GFX1250: v_trunc_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xdd,0xd5,0x6b,0x00,0x00,0x00]
-v_trunc_f16_e64 v5, ttmp15
-// GFX1250: v_trunc_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xdd,0xd5,0x7b,0x00,0x00,0x00]
+v_trunc_f16_e64 v5.l, ttmp15
+// GFX1250: v_trunc_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xdd,0xd5,0x7b,0x00,0x00,0x00]
-v_trunc_f16_e64 v5, m0
-// GFX1250: v_trunc_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xdd,0xd5,0x7d,0x00,0x00,0x00]
+v_trunc_f16_e64 v5.l, m0
+// GFX1250: v_trunc_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xdd,0xd5,0x7d,0x00,0x00,0x00]
-v_trunc_f16_e64 v5, exec_lo
-// GFX1250: v_trunc_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xdd,0xd5,0x7e,0x00,0x00,0x00]
+v_trunc_f16_e64 v5.l, exec_lo
+// GFX1250: v_trunc_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xdd,0xd5,0x7e,0x00,0x00,0x00]
-v_trunc_f16_e64 v5, exec_hi
-// GFX1250: v_trunc_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xdd,0xd5,0x7f,0x00,0x00,0x00]
+v_trunc_f16_e64 v5.l, exec_hi
+// GFX1250: v_trunc_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xdd,0xd5,0x7f,0x00,0x00,0x00]
-v_trunc_f16_e64 v5, null
-// GFX1250: v_trunc_f16_e64 v5, null ; encoding: [0x05,0x00,0xdd,0xd5,0x7c,0x00,0x00,0x00]
+v_trunc_f16_e64 v5.l, null
+// GFX1250: v_trunc_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xdd,0xd5,0x7c,0x00,0x00,0x00]
-v_trunc_f16_e64 v5, -1
-// GFX1250: v_trunc_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xdd,0xd5,0xc1,0x00,0x00,0x00]
+v_trunc_f16_e64 v5.l, -1
+// GFX1250: v_trunc_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xdd,0xd5,0xc1,0x00,0x00,0x00]
-v_trunc_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_trunc_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xdd,0xd5,0xf0,0x00,0x00,0x08]
+v_trunc_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_trunc_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xdd,0xd5,0xf0,0x00,0x00,0x08]
-v_trunc_f16_e64 v5, src_scc mul:4
-// GFX1250: v_trunc_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xdd,0xd5,0xfd,0x00,0x00,0x10]
+v_trunc_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_trunc_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xdd,0xd5,0xfd,0x00,0x00,0x10]
-v_trunc_f16_e64 v255, -|0xfe0b| clamp div:2
-// GFX1250: v_trunc_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdd,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
+v_trunc_f16_e64 v255.l, -|0xfe0b| clamp div:2
+// GFX1250: v_trunc_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdd,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00]
v_trunc_f16 v1.h, v128.l
// GFX1250: v_trunc_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xdd,0xd5,0x80,0x01,0x00,0x00]
@@ -3868,98 +3868,98 @@ v_tanh_f32_e64 v5, src_scc mul:4
v_tanh_f32_e64 v255, -|0xaf123456| clamp div:2
// GFX1250: v_tanh_f32_e64 v255, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0x9e,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf]
-v_tanh_f16_e64 v5, v1
-// GFX1250: v_tanh_f16_e64 v5, v1 ; encoding: [0x05,0x00,0x9f,0xd5,0x01,0x01,0x00,0x00]
+v_tanh_f16_e64 v5.l, v1.l
+// GFX1250: v_tanh_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0x9f,0xd5,0x01,0x01,0x00,0x00]
-v_tanh_f16_e64 v5, v255
-// GFX1250: v_tanh_f16_e64 v5, v255 ; encoding: [0x05,0x00,0x9f,0xd5,0xff,0x01,0x00,0x00]
+v_tanh_f16_e64 v5.l, v255.l
+// GFX1250: v_tanh_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0x9f,0xd5,0xff,0x01,0x00,0x00]
-v_tanh_f16_e64 v5, s1
-// GFX1250: v_tanh_f16_e64 v5, s1 ; encoding: [0x05,0x00,0x9f,0xd5,0x01,0x00,0x00,0x00]
+v_tanh_f16_e64 v5.l, s1
+// GFX1250: v_tanh_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0x9f,0xd5,0x01,0x00,0x00,0x00]
-v_tanh_f16_e64 v5, s105
-// GFX1250: v_tanh_f16_e64 v5, s105 ; encoding: [0x05,0x00,0x9f,0xd5,0x69,0x00,0x00,0x00]
+v_tanh_f16_e64 v5.l, s105
+// GFX1250: v_tanh_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0x9f,0xd5,0x69,0x00,0x00,0x00]
-v_tanh_f16_e64 v5, vcc_lo
-// GFX1250: v_tanh_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0x9f,0xd5,0x6a,0x00,0x00,0x00]
+v_tanh_f16_e64 v5.l, vcc_lo
+// GFX1250: v_tanh_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0x9f,0xd5,0x6a,0x00,0x00,0x00]
-v_tanh_f16_e64 v5, vcc_hi
-// GFX1250: v_tanh_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0x9f,0xd5,0x6b,0x00,0x00,0x00]
+v_tanh_f16_e64 v5.l, vcc_hi
+// GFX1250: v_tanh_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0x9f,0xd5,0x6b,0x00,0x00,0x00]
-v_tanh_f16_e64 v5, ttmp15
-// GFX1250: v_tanh_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0x9f,0xd5,0x7b,0x00,0x00,0x00]
+v_tanh_f16_e64 v5.l, ttmp15
+// GFX1250: v_tanh_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0x9f,0xd5,0x7b,0x00,0x00,0x00]
-v_tanh_f16_e64 v5, m0
-// GFX1250: v_tanh_f16_e64 v5, m0 ; encoding: [0x05,0x00,0x9f,0xd5,0x7d,0x00,0x00,0x00]
+v_tanh_f16_e64 v5.l, m0
+// GFX1250: v_tanh_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0x9f,0xd5,0x7d,0x00,0x00,0x00]
-v_tanh_f16_e64 v5, exec_lo
-// GFX1250: v_tanh_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0x9f,0xd5,0x7e,0x00,0x00,0x00]
+v_tanh_f16_e64 v5.l, exec_lo
+// GFX1250: v_tanh_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0x9f,0xd5,0x7e,0x00,0x00,0x00]
-v_tanh_f16_e64 v5, exec_hi
-// GFX1250: v_tanh_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0x9f,0xd5,0x7f,0x00,0x00,0x00]
+v_tanh_f16_e64 v5.l, exec_hi
+// GFX1250: v_tanh_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0x9f,0xd5,0x7f,0x00,0x00,0x00]
-v_tanh_f16_e64 v5, null
-// GFX1250: v_tanh_f16_e64 v5, null ; encoding: [0x05,0x00,0x9f,0xd5,0x7c,0x00,0x00,0x00]
+v_tanh_f16_e64 v5.l, null
+// GFX1250: v_tanh_f16_e64 v5.l, null ; encoding: [0x05,0x00,0x9f,0xd5,0x7c,0x00,0x00,0x00]
-v_tanh_f16_e64 v5, -1
-// GFX1250: v_tanh_f16_e64 v5, -1 ; encoding: [0x05,0x00,0x9f,0xd5,0xc1,0x00,0x00,0x00]
+v_tanh_f16_e64 v5.l, -1
+// GFX1250: v_tanh_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0x9f,0xd5,0xc1,0x00,0x00,0x00]
-v_tanh_f16_e64 v5, 0.5 mul:2
-// GFX1250: v_tanh_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0x9f,0xd5,0xf0,0x00,0x00,0x08]
+v_tanh_f16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_tanh_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0x9f,0xd5,0xf0,0x00,0x00,0x08]
-v_tanh_f16_e64 v5, src_scc mul:4
-// GFX1250: v_tanh_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0x9f,0xd5,0xfd,0x00,0x00,0x10]
+v_tanh_f16_e64 v5.l, src_scc mul:4
+// GFX1250: v_tanh_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0x9f,0xd5,0xfd,0x00,0x00,0x10]
-v_tanh_f16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_tanh_f16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0x9f,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
+v_tanh_f16_e64 v255.l, -|0x8000| clamp div:2
+// GFX1250: v_tanh_f16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0x9f,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
v_tanh_f16 v5.l, v128.h
// GFX1250: v_tanh_f16_e64 v5.l, v128.h op_sel:[1,0] ; encoding: [0x05,0x08,0x9f,0xd5,0x80,0x01,0x00,0x00]
-v_tanh_bf16_e64 v5, v1
-// GFX1250: v_tanh_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xca,0xd5,0x01,0x01,0x00,0x00]
+v_tanh_bf16_e64 v5.l, v1.l
+// GFX1250: v_tanh_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xca,0xd5,0x01,0x01,0x00,0x00]
-v_tanh_bf16_e64 v5, v255
-// GFX1250: v_tanh_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xca,0xd5,0xff,0x01,0x00,0x00]
+v_tanh_bf16_e64 v5.l, v255.l
+// GFX1250: v_tanh_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xca,0xd5,0xff,0x01,0x00,0x00]
-v_tanh_bf16_e64 v5, s1
-// GFX1250: v_tanh_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xca,0xd5,0x01,0x00,0x00,0x00]
+v_tanh_bf16_e64 v5.l, s1
+// GFX1250: v_tanh_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xca,0xd5,0x01,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5, s105
-// GFX1250: v_tanh_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xca,0xd5,0x69,0x00,0x00,0x00]
+v_tanh_bf16_e64 v5.l, s105
+// GFX1250: v_tanh_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xca,0xd5,0x69,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5, vcc_lo
-// GFX1250: v_tanh_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xca,0xd5,0x6a,0x00,0x00,0x00]
+v_tanh_bf16_e64 v5.l, vcc_lo
+// GFX1250: v_tanh_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xca,0xd5,0x6a,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5, vcc_hi
-// GFX1250: v_tanh_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xca,0xd5,0x6b,0x00,0x00,0x00]
+v_tanh_bf16_e64 v5.l, vcc_hi
+// GFX1250: v_tanh_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xca,0xd5,0x6b,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5, ttmp15
-// GFX1250: v_tanh_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xca,0xd5,0x7b,0x00,0x00,0x00]
+v_tanh_bf16_e64 v5.l, ttmp15
+// GFX1250: v_tanh_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xca,0xd5,0x7b,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5, m0
-// GFX1250: v_tanh_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xca,0xd5,0x7d,0x00,0x00,0x00]
+v_tanh_bf16_e64 v5.l, m0
+// GFX1250: v_tanh_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xca,0xd5,0x7d,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5, exec_lo
-// GFX1250: v_tanh_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xca,0xd5,0x7e,0x00,0x00,0x00]
+v_tanh_bf16_e64 v5.l, exec_lo
+// GFX1250: v_tanh_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xca,0xd5,0x7e,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5, exec_hi
-// GFX1250: v_tanh_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xca,0xd5,0x7f,0x00,0x00,0x00]
+v_tanh_bf16_e64 v5.l, exec_hi
+// GFX1250: v_tanh_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xca,0xd5,0x7f,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5, null
-// GFX1250: v_tanh_bf16_e64 v5, null ; encoding: [0x05,0x00,0xca,0xd5,0x7c,0x00,0x00,0x00]
+v_tanh_bf16_e64 v5.l, null
+// GFX1250: v_tanh_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xca,0xd5,0x7c,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5, -1
-// GFX1250: v_tanh_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00]
+v_tanh_bf16_e64 v5.l, -1
+// GFX1250: v_tanh_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_tanh_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08]
+v_tanh_bf16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_tanh_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08]
-v_tanh_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_tanh_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10]
+v_tanh_bf16_e64 v5.l, src_scc mul:4
+// GFX1250: v_tanh_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10]
-v_tanh_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_tanh_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
+v_tanh_bf16_e64 v255.l, -|0x8000| clamp div:2
+// GFX1250: v_tanh_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
v_tanh_bf16 v5.l, v128.h
// GFX1250: v_tanh_bf16_e64 v5.l, v128.h op_sel:[1,0] ; encoding: [0x05,0x08,0xca,0xd5,0x80,0x01,0x00,0x00]
@@ -4000,347 +4000,347 @@ v_prng_b32_e64 v5, null
v_prng_b32_e64 v5, -1
// GFX1250: v_prng_b32_e64 v5, -1 ; encoding: [0x05,0x00,0xcb,0xd5,0xc1,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, v1
-// GFX1250: v_rcp_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xf9,0xd5,0x01,0x01,0x00,0x00]
+v_rcp_bf16_e64 v5.l, v1.l
+// GFX1250: v_rcp_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xf9,0xd5,0x01,0x01,0x00,0x00]
-v_rcp_bf16_e64 v5, v255
-// GFX1250: v_rcp_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xf9,0xd5,0xff,0x01,0x00,0x00]
+v_rcp_bf16_e64 v5.l, v255.l
+// GFX1250: v_rcp_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xf9,0xd5,0xff,0x01,0x00,0x00]
-v_rcp_bf16_e64 v5, s1
-// GFX1250: v_rcp_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xf9,0xd5,0x01,0x00,0x00,0x00]
+v_rcp_bf16_e64 v5.l, s1
+// GFX1250: v_rcp_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xf9,0xd5,0x01,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, s105
-// GFX1250: v_rcp_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xf9,0xd5,0x69,0x00,0x00,0x00]
+v_rcp_bf16_e64 v5.l, s105
+// GFX1250: v_rcp_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xf9,0xd5,0x69,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, vcc_lo
-// GFX1250: v_rcp_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xf9,0xd5,0x6a,0x00,0x00,0x00]
+v_rcp_bf16_e64 v5.l, vcc_lo
+// GFX1250: v_rcp_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xf9,0xd5,0x6a,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, vcc_hi
-// GFX1250: v_rcp_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x6b,0x00,0x00,0x00]
+v_rcp_bf16_e64 v5.l, vcc_hi
+// GFX1250: v_rcp_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x6b,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, ttmp15
-// GFX1250: v_rcp_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xf9,0xd5,0x7b,0x00,0x00,0x00]
+v_rcp_bf16_e64 v5.l, ttmp15
+// GFX1250: v_rcp_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xf9,0xd5,0x7b,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, m0
-// GFX1250: v_rcp_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xf9,0xd5,0x7d,0x00,0x00,0x00]
+v_rcp_bf16_e64 v5.l, m0
+// GFX1250: v_rcp_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xf9,0xd5,0x7d,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, exec_lo
-// GFX1250: v_rcp_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xf9,0xd5,0x7e,0x00,0x00,0x00]
+v_rcp_bf16_e64 v5.l, exec_lo
+// GFX1250: v_rcp_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xf9,0xd5,0x7e,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, exec_hi
-// GFX1250: v_rcp_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x7f,0x00,0x00,0x00]
+v_rcp_bf16_e64 v5.l, exec_hi
+// GFX1250: v_rcp_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x7f,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, null
-// GFX1250: v_rcp_bf16_e64 v5, null ; encoding: [0x05,0x00,0xf9,0xd5,0x7c,0x00,0x00,0x00]
+v_rcp_bf16_e64 v5.l, null
+// GFX1250: v_rcp_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xf9,0xd5,0x7c,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, -1
-// GFX1250: v_rcp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00]
+v_rcp_bf16_e64 v5.l, -1
+// GFX1250: v_rcp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_rcp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08]
+v_rcp_bf16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_rcp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08]
-v_rcp_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_rcp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10]
+v_rcp_bf16_e64 v5.l, src_scc mul:4
+// GFX1250: v_rcp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10]
-v_rcp_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_rcp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
+v_rcp_bf16_e64 v255.l, -|0x8000| clamp div:2
+// GFX1250: v_rcp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
v_rcp_bf16 v5.h, v128.h
// GFX1250: v_rcp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xf9,0xd5,0x80,0x01,0x00,0x00]
-v_sqrt_bf16_e64 v5, v1
-// GFX1250: v_sqrt_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x01,0x00,0x00]
+v_sqrt_bf16_e64 v5.l, v1.l
+// GFX1250: v_sqrt_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x01,0x00,0x00]
-v_sqrt_bf16_e64 v5, v255
-// GFX1250: v_sqrt_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfa,0xd5,0xff,0x01,0x00,0x00]
+v_sqrt_bf16_e64 v5.l, v255.l
+// GFX1250: v_sqrt_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfa,0xd5,0xff,0x01,0x00,0x00]
-v_sqrt_bf16_e64 v5, s1
-// GFX1250: v_sqrt_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x00,0x00,0x00]
+v_sqrt_bf16_e64 v5.l, s1
+// GFX1250: v_sqrt_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5, s105
-// GFX1250: v_sqrt_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfa,0xd5,0x69,0x00,0x00,0x00]
+v_sqrt_bf16_e64 v5.l, s105
+// GFX1250: v_sqrt_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfa,0xd5,0x69,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5, vcc_lo
-// GFX1250: v_sqrt_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfa,0xd5,0x6a,0x00,0x00,0x00]
+v_sqrt_bf16_e64 v5.l, vcc_lo
+// GFX1250: v_sqrt_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfa,0xd5,0x6a,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5, vcc_hi
-// GFX1250: v_sqrt_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x6b,0x00,0x00,0x00]
+v_sqrt_bf16_e64 v5.l, vcc_hi
+// GFX1250: v_sqrt_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x6b,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5, ttmp15
-// GFX1250: v_sqrt_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfa,0xd5,0x7b,0x00,0x00,0x00]
+v_sqrt_bf16_e64 v5.l, ttmp15
+// GFX1250: v_sqrt_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfa,0xd5,0x7b,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5, m0
-// GFX1250: v_sqrt_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfa,0xd5,0x7d,0x00,0x00,0x00]
+v_sqrt_bf16_e64 v5.l, m0
+// GFX1250: v_sqrt_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfa,0xd5,0x7d,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5, exec_lo
-// GFX1250: v_sqrt_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfa,0xd5,0x7e,0x00,0x00,0x00]
+v_sqrt_bf16_e64 v5.l, exec_lo
+// GFX1250: v_sqrt_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfa,0xd5,0x7e,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5, exec_hi
-// GFX1250: v_sqrt_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x7f,0x00,0x00,0x00]
+v_sqrt_bf16_e64 v5.l, exec_hi
+// GFX1250: v_sqrt_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x7f,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5, null
-// GFX1250: v_sqrt_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfa,0xd5,0x7c,0x00,0x00,0x00]
+v_sqrt_bf16_e64 v5.l, null
+// GFX1250: v_sqrt_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfa,0xd5,0x7c,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5, -1
-// GFX1250: v_sqrt_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00]
+v_sqrt_bf16_e64 v5.l, -1
+// GFX1250: v_sqrt_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_sqrt_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08]
+v_sqrt_bf16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_sqrt_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08]
-v_sqrt_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_sqrt_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10]
+v_sqrt_bf16_e64 v5.l, src_scc mul:4
+// GFX1250: v_sqrt_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10]
-v_sqrt_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_sqrt_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
+v_sqrt_bf16_e64 v255.l, -|0x8000| clamp div:2
+// GFX1250: v_sqrt_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
v_sqrt_bf16 v5.h, v128.h
// GFX1250: v_sqrt_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfa,0xd5,0x80,0x01,0x00,0x00]
-v_rsq_bf16_e64 v5, v1
-// GFX1250: v_rsq_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x01,0x00,0x00]
+v_rsq_bf16_e64 v5.l, v1.l
+// GFX1250: v_rsq_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x01,0x00,0x00]
-v_rsq_bf16_e64 v5, v255
-// GFX1250: v_rsq_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfb,0xd5,0xff,0x01,0x00,0x00]
+v_rsq_bf16_e64 v5.l, v255.l
+// GFX1250: v_rsq_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfb,0xd5,0xff,0x01,0x00,0x00]
-v_rsq_bf16_e64 v5, s1
-// GFX1250: v_rsq_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x00,0x00,0x00]
+v_rsq_bf16_e64 v5.l, s1
+// GFX1250: v_rsq_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5, s105
-// GFX1250: v_rsq_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfb,0xd5,0x69,0x00,0x00,0x00]
+v_rsq_bf16_e64 v5.l, s105
+// GFX1250: v_rsq_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfb,0xd5,0x69,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5, vcc_lo
-// GFX1250: v_rsq_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfb,0xd5,0x6a,0x00,0x00,0x00]
+v_rsq_bf16_e64 v5.l, vcc_lo
+// GFX1250: v_rsq_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfb,0xd5,0x6a,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5, vcc_hi
-// GFX1250: v_rsq_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x6b,0x00,0x00,0x00]
+v_rsq_bf16_e64 v5.l, vcc_hi
+// GFX1250: v_rsq_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x6b,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5, ttmp15
-// GFX1250: v_rsq_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfb,0xd5,0x7b,0x00,0x00,0x00]
+v_rsq_bf16_e64 v5.l, ttmp15
+// GFX1250: v_rsq_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfb,0xd5,0x7b,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5, m0
-// GFX1250: v_rsq_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfb,0xd5,0x7d,0x00,0x00,0x00]
+v_rsq_bf16_e64 v5.l, m0
+// GFX1250: v_rsq_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfb,0xd5,0x7d,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5, exec_lo
-// GFX1250: v_rsq_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfb,0xd5,0x7e,0x00,0x00,0x00]
+v_rsq_bf16_e64 v5.l, exec_lo
+// GFX1250: v_rsq_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfb,0xd5,0x7e,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5, exec_hi
-// GFX1250: v_rsq_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x7f,0x00,0x00,0x00]
+v_rsq_bf16_e64 v5.l, exec_hi
+// GFX1250: v_rsq_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x7f,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5, null
-// GFX1250: v_rsq_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfb,0xd5,0x7c,0x00,0x00,0x00]
+v_rsq_bf16_e64 v5.l, null
+// GFX1250: v_rsq_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfb,0xd5,0x7c,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5, -1
-// GFX1250: v_rsq_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00]
+v_rsq_bf16_e64 v5.l, -1
+// GFX1250: v_rsq_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_rsq_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08]
+v_rsq_bf16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_rsq_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08]
-v_rsq_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_rsq_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10]
+v_rsq_bf16_e64 v5.l, src_scc mul:4
+// GFX1250: v_rsq_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10]
-v_rsq_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_rsq_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
+v_rsq_bf16_e64 v255.l, -|0x8000| clamp div:2
+// GFX1250: v_rsq_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
v_rsq_bf16 v5.h, v128.h
// GFX1250: v_rsq_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfb,0xd5,0x80,0x01,0x00,0x00]
-v_log_bf16_e64 v5, v1
-// GFX1250: v_log_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x01,0x00,0x00]
+v_log_bf16_e64 v5.l, v1.l
+// GFX1250: v_log_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x01,0x00,0x00]
-v_log_bf16_e64 v5, v255
-// GFX1250: v_log_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfc,0xd5,0xff,0x01,0x00,0x00]
+v_log_bf16_e64 v5.l, v255.l
+// GFX1250: v_log_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfc,0xd5,0xff,0x01,0x00,0x00]
-v_log_bf16_e64 v5, s1
-// GFX1250: v_log_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x00,0x00,0x00]
+v_log_bf16_e64 v5.l, s1
+// GFX1250: v_log_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x00,0x00,0x00]
-v_log_bf16_e64 v5, s105
-// GFX1250: v_log_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfc,0xd5,0x69,0x00,0x00,0x00]
+v_log_bf16_e64 v5.l, s105
+// GFX1250: v_log_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfc,0xd5,0x69,0x00,0x00,0x00]
-v_log_bf16_e64 v5, vcc_lo
-// GFX1250: v_log_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfc,0xd5,0x6a,0x00,0x00,0x00]
+v_log_bf16_e64 v5.l, vcc_lo
+// GFX1250: v_log_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfc,0xd5,0x6a,0x00,0x00,0x00]
-v_log_bf16_e64 v5, vcc_hi
-// GFX1250: v_log_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x6b,0x00,0x00,0x00]
+v_log_bf16_e64 v5.l, vcc_hi
+// GFX1250: v_log_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x6b,0x00,0x00,0x00]
-v_log_bf16_e64 v5, ttmp15
-// GFX1250: v_log_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfc,0xd5,0x7b,0x00,0x00,0x00]
+v_log_bf16_e64 v5.l, ttmp15
+// GFX1250: v_log_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfc,0xd5,0x7b,0x00,0x00,0x00]
-v_log_bf16_e64 v5, m0
-// GFX1250: v_log_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfc,0xd5,0x7d,0x00,0x00,0x00]
+v_log_bf16_e64 v5.l, m0
+// GFX1250: v_log_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfc,0xd5,0x7d,0x00,0x00,0x00]
-v_log_bf16_e64 v5, exec_lo
-// GFX1250: v_log_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfc,0xd5,0x7e,0x00,0x00,0x00]
+v_log_bf16_e64 v5.l, exec_lo
+// GFX1250: v_log_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfc,0xd5,0x7e,0x00,0x00,0x00]
-v_log_bf16_e64 v5, exec_hi
-// GFX1250: v_log_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x7f,0x00,0x00,0x00]
+v_log_bf16_e64 v5.l, exec_hi
+// GFX1250: v_log_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x7f,0x00,0x00,0x00]
-v_log_bf16_e64 v5, null
-// GFX1250: v_log_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfc,0xd5,0x7c,0x00,0x00,0x00]
+v_log_bf16_e64 v5.l, null
+// GFX1250: v_log_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfc,0xd5,0x7c,0x00,0x00,0x00]
-v_log_bf16_e64 v5, -1
-// GFX1250: v_log_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00]
+v_log_bf16_e64 v5.l, -1
+// GFX1250: v_log_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00]
-v_log_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_log_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08]
+v_log_bf16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_log_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08]
-v_log_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_log_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10]
+v_log_bf16_e64 v5.l, src_scc mul:4
+// GFX1250: v_log_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10]
-v_log_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_log_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
+v_log_bf16_e64 v255.l, -|0x8000| clamp div:2
+// GFX1250: v_log_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
v_log_bf16 v5.h, v128.h
// GFX1250: v_log_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfc,0xd5,0x80,0x01,0x00,0x00]
-v_exp_bf16_e64 v5, v1
-// GFX1250: v_exp_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x01,0x00,0x00]
+v_exp_bf16_e64 v5.l, v1.l
+// GFX1250: v_exp_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x01,0x00,0x00]
-v_exp_bf16_e64 v5, v255
-// GFX1250: v_exp_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfd,0xd5,0xff,0x01,0x00,0x00]
+v_exp_bf16_e64 v5.l, v255.l
+// GFX1250: v_exp_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfd,0xd5,0xff,0x01,0x00,0x00]
-v_exp_bf16_e64 v5, s1
-// GFX1250: v_exp_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x00,0x00,0x00]
+v_exp_bf16_e64 v5.l, s1
+// GFX1250: v_exp_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x00,0x00,0x00]
-v_exp_bf16_e64 v5, s105
-// GFX1250: v_exp_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfd,0xd5,0x69,0x00,0x00,0x00]
+v_exp_bf16_e64 v5.l, s105
+// GFX1250: v_exp_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfd,0xd5,0x69,0x00,0x00,0x00]
-v_exp_bf16_e64 v5, vcc_lo
-// GFX1250: v_exp_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfd,0xd5,0x6a,0x00,0x00,0x00]
+v_exp_bf16_e64 v5.l, vcc_lo
+// GFX1250: v_exp_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfd,0xd5,0x6a,0x00,0x00,0x00]
-v_exp_bf16_e64 v5, vcc_hi
-// GFX1250: v_exp_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x6b,0x00,0x00,0x00]
+v_exp_bf16_e64 v5.l, vcc_hi
+// GFX1250: v_exp_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x6b,0x00,0x00,0x00]
-v_exp_bf16_e64 v5, ttmp15
-// GFX1250: v_exp_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfd,0xd5,0x7b,0x00,0x00,0x00]
+v_exp_bf16_e64 v5.l, ttmp15
+// GFX1250: v_exp_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfd,0xd5,0x7b,0x00,0x00,0x00]
-v_exp_bf16_e64 v5, m0
-// GFX1250: v_exp_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfd,0xd5,0x7d,0x00,0x00,0x00]
+v_exp_bf16_e64 v5.l, m0
+// GFX1250: v_exp_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfd,0xd5,0x7d,0x00,0x00,0x00]
-v_exp_bf16_e64 v5, exec_lo
-// GFX1250: v_exp_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfd,0xd5,0x7e,0x00,0x00,0x00]
+v_exp_bf16_e64 v5.l, exec_lo
+// GFX1250: v_exp_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfd,0xd5,0x7e,0x00,0x00,0x00]
-v_exp_bf16_e64 v5, exec_hi
-// GFX1250: v_exp_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x7f,0x00,0x00,0x00]
+v_exp_bf16_e64 v5.l, exec_hi
+// GFX1250: v_exp_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x7f,0x00,0x00,0x00]
-v_exp_bf16_e64 v5, null
-// GFX1250: v_exp_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfd,0xd5,0x7c,0x00,0x00,0x00]
+v_exp_bf16_e64 v5.l, null
+// GFX1250: v_exp_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfd,0xd5,0x7c,0x00,0x00,0x00]
-v_exp_bf16_e64 v5, -1
-// GFX1250: v_exp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00]
+v_exp_bf16_e64 v5.l, -1
+// GFX1250: v_exp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00]
-v_exp_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_exp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08]
+v_exp_bf16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_exp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08]
-v_exp_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_exp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10]
+v_exp_bf16_e64 v5.l, src_scc mul:4
+// GFX1250: v_exp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10]
-v_exp_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_exp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
+v_exp_bf16_e64 v255.l, -|0x8000| clamp div:2
+// GFX1250: v_exp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
v_exp_bf16 v5.h, v128.h
// GFX1250: v_exp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfd,0xd5,0x80,0x01,0x00,0x00]
-v_sin_bf16_e64 v5, v1
-// GFX1250: v_sin_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x01,0x00,0x00]
+v_sin_bf16_e64 v5.l, v1.l
+// GFX1250: v_sin_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x01,0x00,0x00]
-v_sin_bf16_e64 v5, v255
-// GFX1250: v_sin_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfe,0xd5,0xff,0x01,0x00,0x00]
+v_sin_bf16_e64 v5.l, v255.l
+// GFX1250: v_sin_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfe,0xd5,0xff,0x01,0x00,0x00]
-v_sin_bf16_e64 v5, s1
-// GFX1250: v_sin_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x00,0x00,0x00]
+v_sin_bf16_e64 v5.l, s1
+// GFX1250: v_sin_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x00,0x00,0x00]
-v_sin_bf16_e64 v5, s105
-// GFX1250: v_sin_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfe,0xd5,0x69,0x00,0x00,0x00]
+v_sin_bf16_e64 v5.l, s105
+// GFX1250: v_sin_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfe,0xd5,0x69,0x00,0x00,0x00]
-v_sin_bf16_e64 v5, vcc_lo
-// GFX1250: v_sin_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfe,0xd5,0x6a,0x00,0x00,0x00]
+v_sin_bf16_e64 v5.l, vcc_lo
+// GFX1250: v_sin_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfe,0xd5,0x6a,0x00,0x00,0x00]
-v_sin_bf16_e64 v5, vcc_hi
-// GFX1250: v_sin_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x6b,0x00,0x00,0x00]
+v_sin_bf16_e64 v5.l, vcc_hi
+// GFX1250: v_sin_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x6b,0x00,0x00,0x00]
-v_sin_bf16_e64 v5, ttmp15
-// GFX1250: v_sin_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfe,0xd5,0x7b,0x00,0x00,0x00]
+v_sin_bf16_e64 v5.l, ttmp15
+// GFX1250: v_sin_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfe,0xd5,0x7b,0x00,0x00,0x00]
-v_sin_bf16_e64 v5, m0
-// GFX1250: v_sin_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfe,0xd5,0x7d,0x00,0x00,0x00]
+v_sin_bf16_e64 v5.l, m0
+// GFX1250: v_sin_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfe,0xd5,0x7d,0x00,0x00,0x00]
-v_sin_bf16_e64 v5, exec_lo
-// GFX1250: v_sin_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfe,0xd5,0x7e,0x00,0x00,0x00]
+v_sin_bf16_e64 v5.l, exec_lo
+// GFX1250: v_sin_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfe,0xd5,0x7e,0x00,0x00,0x00]
-v_sin_bf16_e64 v5, exec_hi
-// GFX1250: v_sin_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x7f,0x00,0x00,0x00]
+v_sin_bf16_e64 v5.l, exec_hi
+// GFX1250: v_sin_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x7f,0x00,0x00,0x00]
-v_sin_bf16_e64 v5, null
-// GFX1250: v_sin_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfe,0xd5,0x7c,0x00,0x00,0x00]
+v_sin_bf16_e64 v5.l, null
+// GFX1250: v_sin_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfe,0xd5,0x7c,0x00,0x00,0x00]
-v_sin_bf16_e64 v5, -1
-// GFX1250: v_sin_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00]
+v_sin_bf16_e64 v5.l, -1
+// GFX1250: v_sin_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00]
-v_sin_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_sin_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08]
+v_sin_bf16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_sin_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08]
-v_sin_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_sin_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10]
+v_sin_bf16_e64 v5.l, src_scc mul:4
+// GFX1250: v_sin_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10]
-v_sin_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_sin_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
+v_sin_bf16_e64 v255.l, -|0x8000| clamp div:2
+// GFX1250: v_sin_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
v_sin_bf16 v5.h, v128.h
// GFX1250: v_sin_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfe,0xd5,0x80,0x01,0x00,0x00]
-v_cos_bf16_e64 v5, v1
-// GFX1250: v_cos_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x01,0x00,0x00]
+v_cos_bf16_e64 v5.l, v1.l
+// GFX1250: v_cos_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x01,0x00,0x00]
-v_cos_bf16_e64 v5, v255
-// GFX1250: v_cos_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xff,0xd5,0xff,0x01,0x00,0x00]
+v_cos_bf16_e64 v5.l, v255.l
+// GFX1250: v_cos_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xff,0xd5,0xff,0x01,0x00,0x00]
-v_cos_bf16_e64 v5, s1
-// GFX1250: v_cos_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x00,0x00,0x00]
+v_cos_bf16_e64 v5.l, s1
+// GFX1250: v_cos_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x00,0x00,0x00]
-v_cos_bf16_e64 v5, s105
-// GFX1250: v_cos_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xff,0xd5,0x69,0x00,0x00,0x00]
+v_cos_bf16_e64 v5.l, s105
+// GFX1250: v_cos_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xff,0xd5,0x69,0x00,0x00,0x00]
-v_cos_bf16_e64 v5, vcc_lo
-// GFX1250: v_cos_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xff,0xd5,0x6a,0x00,0x00,0x00]
+v_cos_bf16_e64 v5.l, vcc_lo
+// GFX1250: v_cos_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xff,0xd5,0x6a,0x00,0x00,0x00]
-v_cos_bf16_e64 v5, vcc_hi
-// GFX1250: v_cos_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xff,0xd5,0x6b,0x00,0x00,0x00]
+v_cos_bf16_e64 v5.l, vcc_hi
+// GFX1250: v_cos_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xff,0xd5,0x6b,0x00,0x00,0x00]
-v_cos_bf16_e64 v5, ttmp15
-// GFX1250: v_cos_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xff,0xd5,0x7b,0x00,0x00,0x00]
+v_cos_bf16_e64 v5.l, ttmp15
+// GFX1250: v_cos_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xff,0xd5,0x7b,0x00,0x00,0x00]
-v_cos_bf16_e64 v5, m0
-// GFX1250: v_cos_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xff,0xd5,0x7d,0x00,0x00,0x00]
+v_cos_bf16_e64 v5.l, m0
+// GFX1250: v_cos_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xff,0xd5,0x7d,0x00,0x00,0x00]
-v_cos_bf16_e64 v5, exec_lo
-// GFX1250: v_cos_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xff,0xd5,0x7e,0x00,0x00,0x00]
+v_cos_bf16_e64 v5.l, exec_lo
+// GFX1250: v_cos_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xff,0xd5,0x7e,0x00,0x00,0x00]
-v_cos_bf16_e64 v5, exec_hi
-// GFX1250: v_cos_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xff,0xd5,0x7f,0x00,0x00,0x00]
+v_cos_bf16_e64 v5.l, exec_hi
+// GFX1250: v_cos_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xff,0xd5,0x7f,0x00,0x00,0x00]
-v_cos_bf16_e64 v5, null
-// GFX1250: v_cos_bf16_e64 v5, null ; encoding: [0x05,0x00,0xff,0xd5,0x7c,0x00,0x00,0x00]
+v_cos_bf16_e64 v5.l, null
+// GFX1250: v_cos_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xff,0xd5,0x7c,0x00,0x00,0x00]
-v_cos_bf16_e64 v5, -1
-// GFX1250: v_cos_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00]
+v_cos_bf16_e64 v5.l, -1
+// GFX1250: v_cos_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00]
-v_cos_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_cos_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08]
+v_cos_bf16_e64 v5.l, 0.5 mul:2
+// GFX1250: v_cos_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08]
-v_cos_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_cos_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10]
+v_cos_bf16_e64 v5.l, src_scc mul:4
+// GFX1250: v_cos_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10]
-v_cos_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_cos_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
+v_cos_bf16_e64 v255.l, -|0x8000| clamp div:2
+// GFX1250: v_cos_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
v_cos_bf16_e64 v5.h, v128.h
// GFX1250: v_cos_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xff,0xd5,0x80,0x01,0x00,0x00]
-v_cvt_f32_bf16_e64 v5, v1
-// GFX1250: v_cvt_f32_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x01,0x00,0x00]
+v_cvt_f32_bf16_e64 v5, v1.l
+// GFX1250: v_cvt_f32_bf16_e64 v5, v1.l ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x01,0x00,0x00]
-v_cvt_f32_bf16_e64 v5, v255
-// GFX1250: v_cvt_f32_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xf2,0xd5,0xff,0x01,0x00,0x00]
+v_cvt_f32_bf16_e64 v5, v255.l
+// GFX1250: v_cvt_f32_bf16_e64 v5, v255.l ; encoding: [0x05,0x00,0xf2,0xd5,0xff,0x01,0x00,0x00]
v_cvt_f32_bf16_e64 v5, s1
// GFX1250: v_cvt_f32_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x00,0x00,0x00]
@@ -4372,11 +4372,11 @@ v_cvt_f32_bf16_e64 v5, null
v_cvt_f32_bf16_e64 v5, -1
// GFX1250: v_cvt_f32_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xf2,0xd5,0xc1,0x00,0x00,0x00]
-v_cvt_f32_bf16_e64 v5, v1 op_sel:[1]
-// GFX1250: v_cvt_f32_bf16_e64 v5, v1 op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0x01,0x01,0x00,0x00]
+v_cvt_f32_bf16_e64 v5, v1.h op_sel:[1,0]
+// GFX1250: v_cvt_f32_bf16_e64 v5, v1.h op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0x01,0x01,0x00,0x00]
-v_cvt_f32_bf16_e64 v5, v255 op_sel:[1]
-// GFX1250: v_cvt_f32_bf16_e64 v5, v255 op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0xff,0x01,0x00,0x00]
+v_cvt_f32_bf16_e64 v5, v255.h op_sel:[1,0]
+// GFX1250: v_cvt_f32_bf16_e64 v5, v255.h op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0xff,0x01,0x00,0x00]
v_cvt_f32_bf16_e64 v5, s1 op_sel:[1]
// GFX1250: v_cvt_f32_bf16_e64 v5, s1 op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0x01,0x00,0x00,0x00]
@@ -4492,32 +4492,32 @@ v_cvt_pk_f16_fp8 v1, v150 op_sel:[1]
v_cvt_pk_f16_fp8 v1, s2 op_sel:[1]
// GFX1250: v_cvt_pk_f16_fp8 v1, s2 op_sel:[1,0] ; encoding: [0x01,0x08,0xf5,0xd5,0x02,0x00,0x00,0x00]
-v_sat_pk4_i4_i8 v150, v2
-// GFX1250: v_sat_pk4_i4_i8_e64 v150, v2 ; encoding: [0x96,0x00,0xf3,0xd5,0x02,0x01,0x00,0x00]
+v_sat_pk4_i4_i8 v150.l, v2
+// GFX1250: v_sat_pk4_i4_i8_e64 v150.l, v2 ; encoding: [0x96,0x00,0xf3,0xd5,0x02,0x01,0x00,0x00]
-v_sat_pk4_i4_i8 v150, s2
-// GFX1250: v_sat_pk4_i4_i8_e64 v150, s2 ; encoding: [0x96,0x00,0xf3,0xd5,0x02,0x00,0x00,0x00]
+v_sat_pk4_i4_i8 v150.l, s2
+// GFX1250: v_sat_pk4_i4_i8_e64 v150.l, s2 ; encoding: [0x96,0x00,0xf3,0xd5,0x02,0x00,0x00,0x00]
-v_sat_pk4_i4_i8 v150, 2
-// GFX1250: v_sat_pk4_i4_i8_e64 v150, 2 ; encoding: [0x96,0x00,0xf3,0xd5,0x82,0x00,0x00,0x00]
+v_sat_pk4_i4_i8 v150.l, 2
+// GFX1250: v_sat_pk4_i4_i8_e64 v150.l, 2 ; encoding: [0x96,0x00,0xf3,0xd5,0x82,0x00,0x00,0x00]
-v_sat_pk4_i4_i8 v150, 0x1234
-// GFX1250: v_sat_pk4_i4_i8_e64 v150, 0x1234 ; encoding: [0x96,0x00,0xf3,0xd5,0xff,0x00,0x00,0x00,0x34,0x12,0x00,0x00]
+v_sat_pk4_i4_i8 v150.l, 0x1234
+// GFX1250: v_sat_pk4_i4_i8_e64 v150.l, 0x1234 ; encoding: [0x96,0x00,0xf3,0xd5,0xff,0x00,0x00,0x00,0x34,0x12,0x00,0x00]
v_sat_pk4_i4_i8 v150.h, v2
// GFX1250: v_sat_pk4_i4_i8_e64 v150.h, v2 op_sel:[0,1] ; encoding: [0x96,0x40,0xf3,0xd5,0x02,0x01,0x00,0x00]
-v_sat_pk4_u4_u8 v150, v2
-// GFX1250: v_sat_pk4_u4_u8_e64 v150, v2 ; encoding: [0x96,0x00,0xf4,0xd5,0x02,0x01,0x00,0x00]
+v_sat_pk4_u4_u8 v150.l, v2
+// GFX1250: v_sat_pk4_u4_u8_e64 v150.l, v2 ; encoding: [0x96,0x00,0xf4,0xd5,0x02,0x01,0x00,0x00]
-v_sat_pk4_u4_u8 v150, s2
-// GFX1250: v_sat_pk4_u4_u8_e64 v150, s2 ; encoding: [0x96,0x00,0xf4,0xd5,0x02,0x00,0x00,0x00]
+v_sat_pk4_u4_u8 v150.l, s2
+// GFX1250: v_sat_pk4_u4_u8_e64 v150.l, s2 ; encoding: [0x96,0x00,0xf4,0xd5,0x02,0x00,0x00,0x00]
-v_sat_pk4_u4_u8 v150, 2
-// GFX1250: v_sat_pk4_u4_u8_e64 v150, 2 ; encoding: [0x96,0x00,0xf4,0xd5,0x82,0x00,0x00,0x00]
+v_sat_pk4_u4_u8 v150.l, 2
+// GFX1250: v_sat_pk4_u4_u8_e64 v150.l, 2 ; encoding: [0x96,0x00,0xf4,0xd5,0x82,0x00,0x00,0x00]
-v_sat_pk4_u4_u8 v150, 0x1234
-// GFX1250: v_sat_pk4_u4_u8_e64 v150, 0x1234 ; encoding: [0x96,0x00,0xf4,0xd5,0xff,0x00,0x00,0x00,0x34,0x12,0x00,0x00]
+v_sat_pk4_u4_u8 v150.l, 0x1234
+// GFX1250: v_sat_pk4_u4_u8_e64 v150.l, 0x1234 ; encoding: [0x96,0x00,0xf4,0xd5,0xff,0x00,0x00,0x00,0x34,0x12,0x00,0x00]
v_sat_pk4_u4_u8 v150.h, v2
// GFX1250: v_sat_pk4_u4_u8_e64 v150.h, v2 op_sel:[0,1] ; encoding: [0x96,0x40,0xf4,0xd5,0x02,0x01,0x00,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s
index f14705f..d163856 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s
@@ -58,120 +58,120 @@ v_tanh_f32_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask
// GFX1250: v_tanh_f32_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x9e,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
+v_tanh_f16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
+v_tanh_f16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 row_mirror
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
+v_tanh_f16_e64_dpp v5.l, v1.l row_mirror
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 row_half_mirror
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
+v_tanh_f16_e64_dpp v5.l, v1.l row_half_mirror
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 row_shl:1
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
+v_tanh_f16_e64_dpp v5.l, v1.l row_shl:1
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 row_shl:15
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
+v_tanh_f16_e64_dpp v5.l, v1.l row_shl:15
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 row_shr:1
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
+v_tanh_f16_e64_dpp v5.l, v1.l row_shr:1
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 row_shr:15
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
+v_tanh_f16_e64_dpp v5.l, v1.l row_shr:15
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 row_ror:1
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
+v_tanh_f16_e64_dpp v5.l, v1.l row_ror:1
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 row_ror:15
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
+v_tanh_f16_e64_dpp v5.l, v1.l row_ror:15
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
+v_tanh_f16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
+v_tanh_f16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
+v_tanh_f16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_tanh_f16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x9f,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
+v_tanh_f16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_tanh_f16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x9f,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_tanh_f16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_tanh_f16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
+v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
+v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 row_mirror
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
+v_tanh_bf16_e64_dpp v5.l, v1.l row_mirror
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 row_half_mirror
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
+v_tanh_bf16_e64_dpp v5.l, v1.l row_half_mirror
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 row_shl:1
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
+v_tanh_bf16_e64_dpp v5.l, v1.l row_shl:1
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 row_shl:15
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
+v_tanh_bf16_e64_dpp v5.l, v1.l row_shl:15
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 row_shr:1
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
+v_tanh_bf16_e64_dpp v5.l, v1.l row_shr:1
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 row_shr:15
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
+v_tanh_bf16_e64_dpp v5.l, v1.l row_shr:15
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 row_ror:1
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
+v_tanh_bf16_e64_dpp v5.l, v1.l row_ror:1
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 row_ror:15
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
+v_tanh_bf16_e64_dpp v5.l, v1.l row_ror:15
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
+v_tanh_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
+v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
+v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
+v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_tanh_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
@@ -222,468 +222,468 @@ v_prng_b32_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_prng_b32_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xcb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
+v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
+v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 row_mirror
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
+v_rcp_bf16_e64_dpp v5.l, v1.l row_mirror
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 row_half_mirror
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
+v_rcp_bf16_e64_dpp v5.l, v1.l row_half_mirror
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 row_shl:1
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
+v_rcp_bf16_e64_dpp v5.l, v1.l row_shl:1
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 row_shl:15
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
+v_rcp_bf16_e64_dpp v5.l, v1.l row_shl:15
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 row_shr:1
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
+v_rcp_bf16_e64_dpp v5.l, v1.l row_shr:1
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 row_shr:15
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
+v_rcp_bf16_e64_dpp v5.l, v1.l row_shr:15
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 row_ror:1
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
+v_rcp_bf16_e64_dpp v5.l, v1.l row_ror:1
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 row_ror:15
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
+v_rcp_bf16_e64_dpp v5.l, v1.l row_ror:15
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
+v_rcp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
+v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
+v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
+v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_rcp_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
+v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
+v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 row_mirror
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
+v_sqrt_bf16_e64_dpp v5.l, v1.l row_mirror
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 row_half_mirror
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
+v_sqrt_bf16_e64_dpp v5.l, v1.l row_half_mirror
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 row_shl:1
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
+v_sqrt_bf16_e64_dpp v5.l, v1.l row_shl:1
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 row_shl:15
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
+v_sqrt_bf16_e64_dpp v5.l, v1.l row_shl:15
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 row_shr:1
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
+v_sqrt_bf16_e64_dpp v5.l, v1.l row_shr:1
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 row_shr:15
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
+v_sqrt_bf16_e64_dpp v5.l, v1.l row_shr:15
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 row_ror:1
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
+v_sqrt_bf16_e64_dpp v5.l, v1.l row_ror:1
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 row_ror:15
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
+v_sqrt_bf16_e64_dpp v5.l, v1.l row_ror:15
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
+v_sqrt_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
+v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
+v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
+v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sqrt_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
+v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
+v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 row_mirror
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
+v_rsq_bf16_e64_dpp v5.l, v1.l row_mirror
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 row_half_mirror
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
+v_rsq_bf16_e64_dpp v5.l, v1.l row_half_mirror
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 row_shl:1
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
+v_rsq_bf16_e64_dpp v5.l, v1.l row_shl:1
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 row_shl:15
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
+v_rsq_bf16_e64_dpp v5.l, v1.l row_shl:15
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 row_shr:1
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
+v_rsq_bf16_e64_dpp v5.l, v1.l row_shr:1
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 row_shr:15
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
+v_rsq_bf16_e64_dpp v5.l, v1.l row_shr:15
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 row_ror:1
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
+v_rsq_bf16_e64_dpp v5.l, v1.l row_ror:1
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 row_ror:15
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
+v_rsq_bf16_e64_dpp v5.l, v1.l row_ror:15
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
+v_rsq_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
+v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
+v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
+v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_rsq_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_log_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
+v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_log_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
+v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 row_mirror
-// GFX1250: v_log_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
+v_log_bf16_e64_dpp v5.l, v1.l row_mirror
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 row_half_mirror
-// GFX1250: v_log_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
+v_log_bf16_e64_dpp v5.l, v1.l row_half_mirror
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 row_shl:1
-// GFX1250: v_log_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
+v_log_bf16_e64_dpp v5.l, v1.l row_shl:1
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 row_shl:15
-// GFX1250: v_log_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
+v_log_bf16_e64_dpp v5.l, v1.l row_shl:15
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 row_shr:1
-// GFX1250: v_log_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
+v_log_bf16_e64_dpp v5.l, v1.l row_shr:1
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 row_shr:15
-// GFX1250: v_log_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
+v_log_bf16_e64_dpp v5.l, v1.l row_shr:15
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 row_ror:1
-// GFX1250: v_log_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
+v_log_bf16_e64_dpp v5.l, v1.l row_ror:1
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 row_ror:15
-// GFX1250: v_log_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
+v_log_bf16_e64_dpp v5.l, v1.l row_ror:15
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_log_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
+v_log_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
+v_log_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
+v_log_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
+v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_log_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
+v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
+v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 row_mirror
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
+v_exp_bf16_e64_dpp v5.l, v1.l row_mirror
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 row_half_mirror
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
+v_exp_bf16_e64_dpp v5.l, v1.l row_half_mirror
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 row_shl:1
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
+v_exp_bf16_e64_dpp v5.l, v1.l row_shl:1
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 row_shl:15
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
+v_exp_bf16_e64_dpp v5.l, v1.l row_shl:15
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 row_shr:1
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
+v_exp_bf16_e64_dpp v5.l, v1.l row_shr:1
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 row_shr:15
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
+v_exp_bf16_e64_dpp v5.l, v1.l row_shr:15
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 row_ror:1
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
+v_exp_bf16_e64_dpp v5.l, v1.l row_ror:1
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 row_ror:15
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
+v_exp_bf16_e64_dpp v5.l, v1.l row_ror:15
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
+v_exp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
+v_exp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
+v_exp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
+v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_exp_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
+v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
+v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 row_mirror
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
+v_sin_bf16_e64_dpp v5.l, v1.l row_mirror
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 row_half_mirror
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
+v_sin_bf16_e64_dpp v5.l, v1.l row_half_mirror
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 row_shl:1
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
+v_sin_bf16_e64_dpp v5.l, v1.l row_shl:1
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 row_shl:15
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
+v_sin_bf16_e64_dpp v5.l, v1.l row_shl:15
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 row_shr:1
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
+v_sin_bf16_e64_dpp v5.l, v1.l row_shr:1
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 row_shr:15
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
+v_sin_bf16_e64_dpp v5.l, v1.l row_shr:15
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 row_ror:1
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
+v_sin_bf16_e64_dpp v5.l, v1.l row_ror:1
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 row_ror:15
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
+v_sin_bf16_e64_dpp v5.l, v1.l row_ror:15
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
+v_sin_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
+v_sin_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
+v_sin_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
+v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sin_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
+v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
+v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 row_mirror
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
+v_cos_bf16_e64_dpp v5.l, v1.l row_mirror
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 row_half_mirror
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
+v_cos_bf16_e64_dpp v5.l, v1.l row_half_mirror
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 row_shl:1
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
+v_cos_bf16_e64_dpp v5.l, v1.l row_shl:1
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 row_shl:15
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
+v_cos_bf16_e64_dpp v5.l, v1.l row_shl:15
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 row_shr:1
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
+v_cos_bf16_e64_dpp v5.l, v1.l row_shr:1
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 row_shr:15
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
+v_cos_bf16_e64_dpp v5.l, v1.l row_shr:15
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 row_ror:1
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
+v_cos_bf16_e64_dpp v5.l, v1.l row_ror:1
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 row_ror:15
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
+v_cos_bf16_e64_dpp v5.l, v1.l row_ror:15
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
+v_cos_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
+v_cos_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
+v_cos_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
+v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_cos_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_cos_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xff,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
-// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
+v_cvt_f32_bf16_e64_dpp v5, v1.l quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3]
-// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
+v_cvt_f32_bf16_e64_dpp v5, v1.l quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16_e64_dpp v5, v1 row_mirror
-// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
+v_cvt_f32_bf16_e64_dpp v5, v1.l row_mirror
+// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16_e64_dpp v5, v1 row_half_mirror
-// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
+v_cvt_f32_bf16_e64_dpp v5, v1.l row_half_mirror
+// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16_e64_dpp v5, v1 row_shl:1
-// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
+v_cvt_f32_bf16_e64_dpp v5, v1.l row_shl:1
+// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16_e64_dpp v5, v1 row_shl:15
-// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
+v_cvt_f32_bf16_e64_dpp v5, v1.l row_shl:15
+// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16_e64_dpp v5, v1 row_shr:1
-// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
+v_cvt_f32_bf16_e64_dpp v5, v1.l row_shr:1
+// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16_e64_dpp v5, v1 row_shr:15
-// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
+v_cvt_f32_bf16_e64_dpp v5, v1.l row_shr:15
+// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16_e64_dpp v5, v1 row_ror:1
-// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
+v_cvt_f32_bf16_e64_dpp v5, v1.l row_ror:1
+// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16_e64_dpp v5, v1 row_ror:15
-// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
+v_cvt_f32_bf16_e64_dpp v5, v1.l row_ror:15
+// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
-// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
+v_cvt_f32_bf16_e64_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_cvt_f32_bf16_e64_dpp v5, v128.h quad_perm:[3,2,1,0]
@@ -766,24 +766,24 @@ v_cvt_pk_f16_fp8 v1, v128.h quad_perm:[0,1,2,3]
// GFX1250: v_cvt_pk_f16_fp8_e64_dpp v1, v128.h op_sel:[1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x08,0xf5,0xd5,0xfa,0x00,0x00,0x00,0x80,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_i4_i8 v150, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf
-// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x00,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+v_sat_pk4_i4_i8 v150.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf
+// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x00,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_i4_i8 v150, v2 row_share:1 fi:1
-// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150, v2 row_share:1 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x96,0x00,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x51,0x05,0xff]
+v_sat_pk4_i4_i8 v150.l, v2 row_share:1 fi:1
+// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.l, v2 row_share:1 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x96,0x00,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x51,0x05,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sat_pk4_i4_i8 v150.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf
// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.h, v2 op_sel:[0,1] quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x40,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_u4_u8 v150, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf
-// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x00,0xf4,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+v_sat_pk4_u4_u8 v150.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf
+// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x00,0xf4,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_u4_u8 v150, v2 row_share:1 fi:1
-// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150, v2 row_share:1 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x96,0x00,0xf4,0xd5,0xfa,0x00,0x00,0x00,0x02,0x51,0x05,0xff]
+v_sat_pk4_u4_u8 v150.l, v2 row_share:1 fi:1
+// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150.l, v2 row_share:1 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x96,0x00,0xf4,0xd5,0xfa,0x00,0x00,0x00,0x02,0x51,0x05,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sat_pk4_u4_u8 v150.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s
index 0414421..6ec4d5f 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s
@@ -18,40 +18,40 @@ v_tanh_f32_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX1250: v_tanh_f32_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x9e,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
+v_tanh_f16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
+v_tanh_f16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_tanh_f16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x9f,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
+v_tanh_f16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x9f,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_f16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_tanh_f16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x9f,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
+v_tanh_f16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_tanh_f16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x9f,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_tanh_f16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_tanh_f16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
+v_tanh_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
+v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
+v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
+v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_tanh_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
@@ -62,140 +62,140 @@ v_prng_b32_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_prng_b32_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xcb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
+v_rcp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
+v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
+v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
+v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_rcp_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
+v_sqrt_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
+v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
+v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
+v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sqrt_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
+v_rsq_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
+v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
+v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
+v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_rsq_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_log_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
+v_log_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
+v_log_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
+v_log_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
+v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_log_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
+v_exp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
+v_exp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
+v_exp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
+v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_exp_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
+v_sin_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
+v_sin_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
+v_sin_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
+v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sin_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
+v_cos_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
+v_cos_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
+v_cos_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
+v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_cos_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
@@ -262,8 +262,8 @@ v_cvt_f16_fp8 v128.l, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_f16_fp8_e64_dpp v128.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x80,0x00,0xf7,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cvt_f32_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf2,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
+v_cvt_f32_bf16_e64_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf2,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_cvt_f32_bf16_e64_dpp v5, v128.h dpp8:[7,6,5,4,3,2,1,0]
@@ -298,24 +298,24 @@ v_cvt_pk_f16_fp8 v1, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_pk_f16_fp8_e64_dpp v1, v128.h op_sel:[1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x08,0xf5,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_i4_i8 v150, v2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x00,0xf3,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+v_sat_pk4_i4_i8 v150.l, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x00,0xf3,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_i4_i8 v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf3,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+v_sat_pk4_i4_i8 v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf3,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sat_pk4_i4_i8 v150.h, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x40,0xf3,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_u4_u8 v150, v2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x00,0xf4,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+v_sat_pk4_u4_u8 v150.l, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x00,0xf4,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sat_pk4_u4_u8 v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf4,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+v_sat_pk4_u4_u8 v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf4,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
v_sat_pk4_u4_u8 v150.h, v2 dpp8:[7,6,5,4,3,2,1,0]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt
index 07dbbdd..94edf22 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt
@@ -720,10 +720,12 @@
# GFX1250: v_cvt_f32_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xe4,0x0a,0x7e]
0x01,0xe5,0x0a,0x7e
-# GFX1250: v_cvt_f32_bf16_e32 v5, v1.l ; encoding: [0x01,0xe5,0x0a,0x7e]
+# GFX1250-REAL16: v_cvt_f32_bf16_e32 v5, v1.l ; encoding: [0x01,0xe5,0x0a,0x7e]
+# GFX1250-FAKE16: v_cvt_f32_bf16_e32 v5, v1 ; encoding: [0x01,0xe5,0x0a,0x7e]
0x7f,0xe5,0x0a,0x7e
-# GFX1250: v_cvt_f32_bf16_e32 v5, v127.l ; encoding: [0x7f,0xe5,0x0a,0x7e]
+# GFX1250-REAL16: v_cvt_f32_bf16_e32 v5, v127.l ; encoding: [0x7f,0xe5,0x0a,0x7e]
+# GFX1250-FAKE16: v_cvt_f32_bf16_e32 v5, v127 ; encoding: [0x7f,0xe5,0x0a,0x7e]
0x6b,0xe4,0x0a,0x7e
# GFX1250: v_cvt_f32_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xe4,0x0a,0x7e]
@@ -732,7 +734,8 @@
# GFX1250: v_cvt_f32_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xe4,0x0a,0x7e]
0x81,0xe5,0x0a,0x7e
-# GFX1250: v_cvt_f32_bf16_e32 v5, v1.h ; encoding: [0x81,0xe5,0x0a,0x7e]
+# GFX1250-REAL16: v_cvt_f32_bf16_e32 v5, v1.h ; encoding: [0x81,0xe5,0x0a,0x7e]
+# GFX1250-FAKE16: v_cvt_f32_bf16_e32 v5, v129/*Invalid register, operand has 'VS_32_Lo128' register class*/ ; encoding: [0x81,0xe5,0x0a,0x7e]
0xff,0xf0,0x02,0x7e,0x34,0x12,0x00,0x00
# GFX1250-REAL16: v_cvt_f16_bf8_e32 v1.l, 0x1234 ; encoding: [0xff,0xf0,0x02,0x7e,0x34,0x12,0x00,0x00]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt
index c12ecb8..93286ca 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt
@@ -615,49 +615,64 @@
# GFX1250-REAL16: v_cos_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7f,0x81,0x1b,0x00,0xff]
0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30
-# GFX1250: v_cvt_f32_bf16_dpp v127, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v127, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30]
0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff]
0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff]
0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff]
0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff]
0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff]
0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff]
0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff]
0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01]
0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff]
0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff]
0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff]
0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff]
0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13]
0xfa,0xe4,0x0a,0x7e,0x81,0x1b,0x00,0xff
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x81,0x1b,0x00,0xff]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x81,0x1b,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v129/*Invalid register, operand has 'VGPR_32_Lo128' register class*/ quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x81,0x1b,0x00,0xff]
0xfa,0xf0,0x02,0x7e,0x02,0x39,0x00,0xff
# GFX1250-REAL16: v_cvt_f16_bf8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf0,0x02,0x7e,0x02,0x39,0x00,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt
index fa7b940..fb3f1b2 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt
@@ -165,16 +165,20 @@
# GFX1250-FAKE16: v_add_f64_e32 v[156:157], v[129:130], v[187:188] ; encoding: [0x81,0x77,0x39,0x05]
0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00
-# GFX1250: v_cvt_f32_bf16_dpp v127, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v127, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00]
0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05]
0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05]
0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05
-# GFX1250: v_cvt_f32_bf16_dpp v5, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05]
+# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v129/*Invalid register, operand has 'VGPR_32_Lo128' register class*/ dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05]
0xe9,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05
# GFX1250-REAL16: v_cvt_f16_bf8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05]
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td
index ce4f010..18960b4 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td
@@ -96,71 +96,71 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK: const uint8_t *GenMyCombiner::getMatchTable() const {
// CHECK-NEXT: constexpr static uint8_t MatchTable0[] = {
-// CHECK-NEXT: /* 0 */ GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(99), GIMT_Encode2(210), /*)*//*default:*//*Label 5*/ GIMT_Encode4(520),
-// CHECK-NEXT: /* 10 */ /*TargetOpcode::G_STORE*//*Label 0*/ GIMT_Encode4(454), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /* 182 */ /*TargetOpcode::G_SEXT*//*Label 1*/ GIMT_Encode4(472), GIMT_Encode4(0),
-// CHECK-NEXT: /* 190 */ /*TargetOpcode::G_ZEXT*//*Label 2*/ GIMT_Encode4(484), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /* 414 */ /*TargetOpcode::G_FNEG*//*Label 3*/ GIMT_Encode4(496), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
-// CHECK-NEXT: /* 450 */ /*TargetOpcode::G_FABS*//*Label 4*/ GIMT_Encode4(508),
-// CHECK-NEXT: /* 454 */ // Label 0: @454
-// CHECK-NEXT: /* 454 */ GIM_Try, /*On fail goto*//*Label 6*/ GIMT_Encode4(471), // Rule ID 2 //
-// CHECK-NEXT: /* 459 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule2Enabled),
-// CHECK-NEXT: /* 462 */ // MIs[0] x
-// CHECK-NEXT: /* 462 */ // No operand predicates
-// CHECK-NEXT: /* 462 */ // MIs[0] y
-// CHECK-NEXT: /* 462 */ // No operand predicates
-// CHECK-NEXT: /* 462 */ GIM_CheckCxxInsnPredicate, /*MI*/0, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_GICombiner0),
-// CHECK-NEXT: /* 466 */ GIM_CheckCxxInsnPredicate, /*MI*/0, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_GICombiner1),
-// CHECK-NEXT: /* 470 */ // Combiner Rule #2: TwoMatchNoApply
-// CHECK-NEXT: /* 470 */ GIR_EraseRootFromParent_Done,
-// CHECK-NEXT: /* 471 */ // Label 6: @471
-// CHECK-NEXT: /* 471 */ GIM_Reject,
-// CHECK-NEXT: /* 472 */ // Label 1: @472
-// CHECK-NEXT: /* 472 */ GIM_Try, /*On fail goto*//*Label 7*/ GIMT_Encode4(483), // Rule ID 3 //
-// CHECK-NEXT: /* 477 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule3Enabled),
-// CHECK-NEXT: /* 480 */ // MIs[0] a
-// CHECK-NEXT: /* 480 */ // No operand predicates
-// CHECK-NEXT: /* 480 */ // MIs[0] y
-// CHECK-NEXT: /* 480 */ // No operand predicates
-// CHECK-NEXT: /* 480 */ // Combiner Rule #3: NoMatchTwoApply
-// CHECK-NEXT: /* 480 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner2),
-// CHECK-NEXT: /* 483 */ // Label 7: @483
-// CHECK-NEXT: /* 483 */ GIM_Reject,
-// CHECK-NEXT: /* 484 */ // Label 2: @484
-// CHECK-NEXT: /* 484 */ GIM_Try, /*On fail goto*//*Label 8*/ GIMT_Encode4(495), // Rule ID 4 //
-// CHECK-NEXT: /* 489 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule4Enabled),
-// CHECK-NEXT: /* 492 */ // MIs[0] a
-// CHECK-NEXT: /* 492 */ // No operand predicates
-// CHECK-NEXT: /* 492 */ // MIs[0] y
-// CHECK-NEXT: /* 492 */ // No operand predicates
-// CHECK-NEXT: /* 492 */ // Combiner Rule #4: CombineCXXOrder
-// CHECK-NEXT: /* 492 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner3),
-// CHECK-NEXT: /* 495 */ // Label 8: @495
-// CHECK-NEXT: /* 495 */ GIM_Reject,
-// CHECK-NEXT: /* 496 */ // Label 3: @496
-// CHECK-NEXT: /* 496 */ GIM_Try, /*On fail goto*//*Label 9*/ GIMT_Encode4(507), // Rule ID 1 //
-// CHECK-NEXT: /* 501 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule1Enabled),
-// CHECK-NEXT: /* 504 */ // MIs[0] a
-// CHECK-NEXT: /* 504 */ // No operand predicates
-// CHECK-NEXT: /* 504 */ // MIs[0] b
-// CHECK-NEXT: /* 504 */ // No operand predicates
-// CHECK-NEXT: /* 504 */ // Combiner Rule #1: TwoMatchTwoApply
-// CHECK-NEXT: /* 504 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner1),
-// CHECK-NEXT: /* 507 */ // Label 9: @507
-// CHECK-NEXT: /* 507 */ GIM_Reject,
-// CHECK-NEXT: /* 508 */ // Label 4: @508
-// CHECK-NEXT: /* 508 */ GIM_Try, /*On fail goto*//*Label 10*/ GIMT_Encode4(519), // Rule ID 0 //
-// CHECK-NEXT: /* 513 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule0Enabled),
-// CHECK-NEXT: /* 516 */ // MIs[0] a
-// CHECK-NEXT: /* 516 */ // No operand predicates
-// CHECK-NEXT: /* 516 */ // MIs[0] b
-// CHECK-NEXT: /* 516 */ // No operand predicates
-// CHECK-NEXT: /* 516 */ // Combiner Rule #0: OneMatchOneApply
-// CHECK-NEXT: /* 516 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner0),
-// CHECK-NEXT: /* 519 */ // Label 10: @519
-// CHECK-NEXT: /* 519 */ GIM_Reject,
-// CHECK-NEXT: /* 520 */ // Label 5: @520
-// CHECK-NEXT: /* 520 */ GIM_Reject,
-// CHECK-NEXT: /* 521 */ }; // Size: 521 bytes
+// CHECK-NEXT: /* 0 */ GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(99), GIMT_Encode2(211), /*)*//*default:*//*Label 5*/ GIMT_Encode4(524),
+// CHECK-NEXT: /* 10 */ /*TargetOpcode::G_STORE*//*Label 0*/ GIMT_Encode4(458), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /* 182 */ /*TargetOpcode::G_SEXT*//*Label 1*/ GIMT_Encode4(476), GIMT_Encode4(0),
+// CHECK-NEXT: /* 190 */ /*TargetOpcode::G_ZEXT*//*Label 2*/ GIMT_Encode4(488), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /* 418 */ /*TargetOpcode::G_FNEG*//*Label 3*/ GIMT_Encode4(500), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0),
+// CHECK-NEXT: /* 454 */ /*TargetOpcode::G_FABS*//*Label 4*/ GIMT_Encode4(512),
+// CHECK-NEXT: /* 458 */ // Label 0: @458
+// CHECK-NEXT: /* 458 */ GIM_Try, /*On fail goto*//*Label 6*/ GIMT_Encode4(475), // Rule ID 2 //
+// CHECK-NEXT: /* 463 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule2Enabled),
+// CHECK-NEXT: /* 466 */ // MIs[0] x
+// CHECK-NEXT: /* 466 */ // No operand predicates
+// CHECK-NEXT: /* 466 */ // MIs[0] y
+// CHECK-NEXT: /* 466 */ // No operand predicates
+// CHECK-NEXT: /* 466 */ GIM_CheckCxxInsnPredicate, /*MI*/0, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_GICombiner0),
+// CHECK-NEXT: /* 470 */ GIM_CheckCxxInsnPredicate, /*MI*/0, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_GICombiner1),
+// CHECK-NEXT: /* 474 */ // Combiner Rule #2: TwoMatchNoApply
+// CHECK-NEXT: /* 474 */ GIR_EraseRootFromParent_Done,
+// CHECK-NEXT: /* 475 */ // Label 6: @475
+// CHECK-NEXT: /* 475 */ GIM_Reject,
+// CHECK-NEXT: /* 476 */ // Label 1: @476
+// CHECK-NEXT: /* 476 */ GIM_Try, /*On fail goto*//*Label 7*/ GIMT_Encode4(487), // Rule ID 3 //
+// CHECK-NEXT: /* 481 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule3Enabled),
+// CHECK-NEXT: /* 484 */ // MIs[0] a
+// CHECK-NEXT: /* 484 */ // No operand predicates
+// CHECK-NEXT: /* 484 */ // MIs[0] y
+// CHECK-NEXT: /* 484 */ // No operand predicates
+// CHECK-NEXT: /* 484 */ // Combiner Rule #3: NoMatchTwoApply
+// CHECK-NEXT: /* 484 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner2),
+// CHECK-NEXT: /* 487 */ // Label 7: @487
+// CHECK-NEXT: /* 487 */ GIM_Reject,
+// CHECK-NEXT: /* 488 */ // Label 2: @488
+// CHECK-NEXT: /* 488 */ GIM_Try, /*On fail goto*//*Label 8*/ GIMT_Encode4(499), // Rule ID 4 //
+// CHECK-NEXT: /* 493 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule4Enabled),
+// CHECK-NEXT: /* 496 */ // MIs[0] a
+// CHECK-NEXT: /* 496 */ // No operand predicates
+// CHECK-NEXT: /* 496 */ // MIs[0] y
+// CHECK-NEXT: /* 496 */ // No operand predicates
+// CHECK-NEXT: /* 496 */ // Combiner Rule #4: CombineCXXOrder
+// CHECK-NEXT: /* 496 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner3),
+// CHECK-NEXT: /* 499 */ // Label 8: @499
+// CHECK-NEXT: /* 499 */ GIM_Reject,
+// CHECK-NEXT: /* 500 */ // Label 3: @500
+// CHECK-NEXT: /* 500 */ GIM_Try, /*On fail goto*//*Label 9*/ GIMT_Encode4(511), // Rule ID 1 //
+// CHECK-NEXT: /* 505 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule1Enabled),
+// CHECK-NEXT: /* 508 */ // MIs[0] a
+// CHECK-NEXT: /* 508 */ // No operand predicates
+// CHECK-NEXT: /* 508 */ // MIs[0] b
+// CHECK-NEXT: /* 508 */ // No operand predicates
+// CHECK-NEXT: /* 508 */ // Combiner Rule #1: TwoMatchTwoApply
+// CHECK-NEXT: /* 508 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner1),
+// CHECK-NEXT: /* 511 */ // Label 9: @511
+// CHECK-NEXT: /* 511 */ GIM_Reject,
+// CHECK-NEXT: /* 512 */ // Label 4: @512
+// CHECK-NEXT: /* 512 */ GIM_Try, /*On fail goto*//*Label 10*/ GIMT_Encode4(523), // Rule ID 0 //
+// CHECK-NEXT: /* 517 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule0Enabled),
+// CHECK-NEXT: /* 520 */ // MIs[0] a
+// CHECK-NEXT: /* 520 */ // No operand predicates
+// CHECK-NEXT: /* 520 */ // MIs[0] b
+// CHECK-NEXT: /* 520 */ // No operand predicates
+// CHECK-NEXT: /* 520 */ // Combiner Rule #0: OneMatchOneApply
+// CHECK-NEXT: /* 520 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner0),
+// CHECK-NEXT: /* 523 */ // Label 10: @523
+// CHECK-NEXT: /* 523 */ GIM_Reject,
+// CHECK-NEXT: /* 524 */ // Label 5: @524
+// CHECK-NEXT: /* 524 */ GIM_Reject,
+// CHECK-NEXT: /* 525 */ }; // Size: 525 bytes
// CHECK-NEXT: return MatchTable0;
// CHECK-NEXT: }
diff --git a/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td b/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td
index 6be1720..fdabc53 100644
--- a/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td
+++ b/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td
@@ -535,7 +535,7 @@ def : Pat<(frag GPR32:$src1, complex:$src2, complex:$src3),
// R00O-NEXT: GIM_Reject,
// R00O: // Label [[DEFAULT_NUM]]: @[[DEFAULT]]
// R00O-NEXT: GIM_Reject,
-// R00O-NEXT: }; // Size: 1898 bytes
+// R00O-NEXT: }; // Size: 1902 bytes
def INSNBOB : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2, GPR32:$src3, GPR32:$src4),
[(set GPR32:$dst,
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/pr161367.ll b/llvm/test/Transforms/CorrelatedValuePropagation/pr161367.ll
new file mode 100644
index 0000000..346eaea
--- /dev/null
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/pr161367.ll
@@ -0,0 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s
+
+; Make sure that we apply trunc to the edge value of %x.
+@g = external global i8
+
+define i16 @pr161367(i64 %x) {
+; CHECK-LABEL: define i16 @pr161367(
+; CHECK-SAME: i64 [[X:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[X]] to i16
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[X]], sub (i64 0, i64 ptrtoint (ptr @g to i64))
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT:.*]], label %[[ELSE:.*]]
+; CHECK: [[ELSE]]:
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[RET:%.*]] = phi i16 [ trunc (i64 sub (i64 0, i64 ptrtoint (ptr @g to i64)) to i16), %[[ENTRY]] ], [ 0, %[[ELSE]] ]
+; CHECK-NEXT: ret i16 [[RET]]
+;
+entry:
+ %trunc = trunc i64 %x to i16
+ %exitcond = icmp eq i64 %x, sub (i64 0, i64 ptrtoint (ptr @g to i64))
+ br i1 %exitcond, label %exit, label %else
+
+else:
+ br label %exit
+
+exit:
+ %ret = phi i16 [ %trunc, %entry ], [ 0, %else ]
+ ret i16 %ret
+}
diff --git a/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll b/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll
index cba1ba8..ad05684 100644
--- a/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll
+++ b/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll
@@ -304,32 +304,43 @@ end:
define void @pr106083_invalidBBarg_fold(i1 %cmp1, i1 %cmp2, i1 %not, ptr %d) {
; CHECK-LABEL: @pr106083_invalidBBarg_fold(
; CHECK-NEXT: bb:
-; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[BB1:%.*]], label [[SEL_SI_UNFOLD_FALSE:%.*]]
-; CHECK: sel.si.unfold.false:
-; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 1, [[BB:%.*]] ]
-; CHECK-NEXT: br label [[BB1]]
+; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK: BB1:
-; CHECK-NEXT: [[I:%.*]] = phi i16 [ 0, [[BB1_BACKEDGE:%.*]] ], [ 0, [[BB]] ], [ 1, [[BB7:%.*]] ], [ 0, [[SEL_SI_UNFOLD_FALSE]] ], [ 1, [[BB7_JT0:%.*]] ]
-; CHECK-NEXT: [[SEL_SI_UNFOLD_PHI:%.*]] = phi i32 [ [[SEL_SI_UNFOLD_PHI]], [[BB1_BACKEDGE]] ], [ [[SEL_SI_UNFOLD_PHI]], [[BB7]] ], [ 0, [[BB]] ], [ [[DOTSI_UNFOLD_PHI1]], [[SEL_SI_UNFOLD_FALSE]] ], [ [[SEL_SI_UNFOLD_PHI]], [[BB7_JT0]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i16 [ 0, [[BB1_BACKEDGE:%.*]] ], [ 0, [[BB:%.*]] ], [ 1, [[BB9:%.*]] ], [ 1, [[BB7_JT0:%.*]] ]
; CHECK-NEXT: br i1 [[NOT:%.*]], label [[BB7_JT0]], label [[BB2:%.*]]
; CHECK: BB2:
; CHECK-NEXT: store i16 0, ptr [[D:%.*]], align 2
-; CHECK-NEXT: br i1 [[CMP2:%.*]], label [[BB7]], label [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0:%.*]]
+; CHECK-NEXT: br i1 [[CMP2:%.*]], label [[BB7:%.*]], label [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0:%.*]]
; CHECK: spec.select.si.unfold.false:
-; CHECK-NEXT: br label [[BB7]]
+; CHECK-NEXT: br label [[BB9]]
; CHECK: spec.select.si.unfold.false.jt0:
; CHECK-NEXT: [[DOTSI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ 0, [[BB2]] ]
; CHECK-NEXT: br label [[BB7_JT0]]
+; CHECK: sel.si.unfold.true:
+; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[BB9]], label [[SEL_SI_UNFOLD_FALSE_JT1:%.*]]
+; CHECK: sel.si.unfold.true.jt0:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 0, [[BB2]] ]
+; CHECK-NEXT: br i1 [[CMP1]], label [[BB7_JT0]], label [[SEL_SI_UNFOLD_FALSE:%.*]]
+; CHECK: sel.si.unfold.false:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2:%.*]] = phi i32 [ 1, [[BB7]] ]
+; CHECK-NEXT: br label [[BB9]]
+; CHECK: sel.si.unfold.false.jt1:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2_JT1:%.*]] = phi i32 [ 1, [[SEL_SI_UNFOLD_TRUE:%.*]] ]
+; CHECK-NEXT: br label [[BB7_JT1:%.*]]
; CHECK: BB7:
-; CHECK-NEXT: [[D_PROMOTED4:%.*]] = phi i16 [ 1, [[BB2]] ], [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]] ]
-; CHECK-NEXT: [[_3:%.*]] = phi i32 [ [[SEL_SI_UNFOLD_PHI]], [[BB2]] ], [ poison, [[SPEC_SELECT_SI_UNFOLD_FALSE]] ]
+; CHECK-NEXT: [[D_PROMOTED4:%.*]] = phi i16 [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]] ], [ 1, [[SEL_SI_UNFOLD_TRUE]] ], [ 1, [[SEL_SI_UNFOLD_FALSE]] ]
+; CHECK-NEXT: [[_3:%.*]] = phi i32 [ poison, [[SPEC_SELECT_SI_UNFOLD_FALSE]] ], [ poison, [[SEL_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI2]], [[SEL_SI_UNFOLD_FALSE]] ]
; CHECK-NEXT: switch i32 [[_3]], label [[BB1_BACKEDGE]] [
; CHECK-NEXT: i32 0, label [[BB1]]
; CHECK-NEXT: i32 1, label [[BB8:%.*]]
; CHECK-NEXT: ]
+; CHECK: BB7.jt1:
+; CHECK-NEXT: [[D_PROMOTED4_JT1:%.*]] = phi i16 [ 1, [[SEL_SI_UNFOLD_FALSE_JT1]] ]
+; CHECK-NEXT: [[_3_JT1:%.*]] = phi i32 [ [[DOTSI_UNFOLD_PHI2_JT1]], [[SEL_SI_UNFOLD_FALSE_JT1]] ]
+; CHECK-NEXT: br label [[BB8]]
; CHECK: BB7.jt0:
-; CHECK-NEXT: [[D_PROMOTED4_JT0:%.*]] = phi i16 [ 0, [[BB1]] ], [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ]
-; CHECK-NEXT: [[_3_JT0:%.*]] = phi i32 [ 0, [[BB1]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ]
+; CHECK-NEXT: [[D_PROMOTED4_JT0:%.*]] = phi i16 [ 0, [[BB1]] ], [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ], [ 1, [[BB7]] ]
+; CHECK-NEXT: [[_3_JT0:%.*]] = phi i32 [ 0, [[BB1]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ], [ [[DOTSI_UNFOLD_PHI1]], [[BB7]] ]
; CHECK-NEXT: br label [[BB1]]
; CHECK: BB1.backedge:
; CHECK-NEXT: br label [[BB1]]
@@ -367,30 +378,40 @@ BB8: ; preds = %BB7
define void @pr106083_select_dead_uses(i1 %cmp1, i1 %not, ptr %p) {
; CHECK-LABEL: @pr106083_select_dead_uses(
; CHECK-NEXT: bb:
-; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[DOTLOOPEXIT6:%.*]], label [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]]
-; CHECK: spec.select.si.unfold.false:
-; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 1, [[BB:%.*]] ]
-; CHECK-NEXT: br label [[DOTLOOPEXIT6]]
+; CHECK-NEXT: br label [[DOTLOOPEXIT6:%.*]]
; CHECK: .loopexit6:
-; CHECK-NEXT: [[SPEC_SELECT_SI_UNFOLD_PHI:%.*]] = phi i32 [ [[SPEC_SELECT_SI_UNFOLD_PHI]], [[SELECT_UNFOLD:%.*]] ], [ 0, [[BB]] ], [ [[DOTSI_UNFOLD_PHI1]], [[SPEC_SELECT_SI_UNFOLD_FALSE]] ]
; CHECK-NEXT: br i1 [[NOT:%.*]], label [[SELECT_UNFOLD_JT0:%.*]], label [[BB1:%.*]]
; CHECK: bb1:
; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: [[NOT2:%.*]] = icmp eq i32 0, 0
-; CHECK-NEXT: br i1 [[NOT2]], label [[SELECT_UNFOLD]], label [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0:%.*]]
+; CHECK-NEXT: br i1 [[NOT2]], label [[SELECT_UNFOLD:%.*]], label [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0:%.*]]
; CHECK: spec.select7.si.unfold.false:
-; CHECK-NEXT: br label [[SELECT_UNFOLD]]
+; CHECK-NEXT: br label [[SELECT_UNFOLD1:%.*]]
; CHECK: spec.select7.si.unfold.false.jt0:
; CHECK-NEXT: [[DOTSI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ 0, [[BB1]] ]
; CHECK-NEXT: br label [[SELECT_UNFOLD_JT0]]
+; CHECK: spec.select.si.unfold.true:
+; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[SELECT_UNFOLD1]], label [[SPEC_SELECT_SI_UNFOLD_FALSE_JT1:%.*]]
+; CHECK: spec.select.si.unfold.true.jt0:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 0, [[BB1]] ]
+; CHECK-NEXT: br i1 [[CMP1]], label [[SELECT_UNFOLD_JT0]], label [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]]
+; CHECK: spec.select.si.unfold.false:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2:%.*]] = phi i32 [ 1, [[SELECT_UNFOLD]] ]
+; CHECK-NEXT: br label [[SELECT_UNFOLD1]]
+; CHECK: spec.select.si.unfold.false.jt1:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2_JT1:%.*]] = phi i32 [ 1, [[SPEC_SELECT_SI_UNFOLD_TRUE:%.*]] ]
+; CHECK-NEXT: br label [[SELECT_UNFOLD_JT1:%.*]]
; CHECK: select.unfold:
-; CHECK-NEXT: [[_2:%.*]] = phi i32 [ [[SPEC_SELECT_SI_UNFOLD_PHI]], [[BB1]] ], [ poison, [[SPEC_SELECT7_SI_UNFOLD_FALSE:%.*]] ]
+; CHECK-NEXT: [[_2:%.*]] = phi i32 [ poison, [[SPEC_SELECT7_SI_UNFOLD_FALSE:%.*]] ], [ poison, [[SPEC_SELECT_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI2]], [[SPEC_SELECT_SI_UNFOLD_FALSE]] ]
; CHECK-NEXT: switch i32 [[_2]], label [[BB2:%.*]] [
; CHECK-NEXT: i32 0, label [[DOTPREHEADER_PREHEADER:%.*]]
; CHECK-NEXT: i32 1, label [[DOTLOOPEXIT6]]
; CHECK-NEXT: ]
+; CHECK: select.unfold.jt1:
+; CHECK-NEXT: [[_2_JT1:%.*]] = phi i32 [ [[DOTSI_UNFOLD_PHI2_JT1]], [[SPEC_SELECT_SI_UNFOLD_FALSE_JT1]] ]
+; CHECK-NEXT: br label [[DOTLOOPEXIT6]]
; CHECK: select.unfold.jt0:
-; CHECK-NEXT: [[_2_JT0:%.*]] = phi i32 [ 0, [[DOTLOOPEXIT6]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0]] ]
+; CHECK-NEXT: [[_2_JT0:%.*]] = phi i32 [ 0, [[DOTLOOPEXIT6]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0]] ], [ [[DOTSI_UNFOLD_PHI1]], [[SELECT_UNFOLD]] ]
; CHECK-NEXT: br label [[DOTPREHEADER_PREHEADER]]
; CHECK: .preheader.preheader:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll b/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll
index 93872c3..663f459 100644
--- a/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll
+++ b/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll
@@ -463,3 +463,87 @@ unreachable:
sw.bb: ; preds = %if.end
br label %while.cond
}
+
+define i16 @pr160250() {
+; CHECK-LABEL: @pr160250(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[FOR_COND48:%.*]]
+; CHECK: for.cond48:
+; CHECK-NEXT: br i1 false, label [[CLEANUP87_JT0:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.else:
+; CHECK-NEXT: br i1 false, label [[DOT6_SI_UNFOLD_TRUE:%.*]], label [[DOT5_SI_UNFOLD_TRUE:%.*]]
+; CHECK: .5.si.unfold.true:
+; CHECK-NEXT: br i1 false, label [[SPEC_SELECT1_SI_UNFOLD_TRUE1:%.*]], label [[DOT5_SI_UNFOLD_FALSE_JT0:%.*]]
+; CHECK: .5.si.unfold.true.jt0:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 0, [[IF_ELSE]] ]
+; CHECK-NEXT: br i1 false, label [[SPEC_SELECT1_SI_UNFOLD_TRUE:%.*]], label [[DOT5_SI_UNFOLD_FALSE:%.*]]
+; CHECK: .5.si.unfold.false:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2:%.*]] = phi i32 [ 0, [[DOT5_SI_UNFOLD_TRUE]] ]
+; CHECK-NEXT: br label [[SPEC_SELECT1_SI_UNFOLD_TRUE1]]
+; CHECK: .5.si.unfold.false.jt0:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2_JT0:%.*]] = phi i32 [ 0, [[DOT5_SI_UNFOLD_TRUE1:%.*]] ]
+; CHECK-NEXT: br label [[SPEC_SELECT1_SI_UNFOLD_TRUE]]
+; CHECK: spec.select1.si.unfold.true:
+; CHECK-NEXT: [[DOT5_SI_UNFOLD_PHI:%.*]] = phi i32 [ poison, [[DOT5_SI_UNFOLD_TRUE1]] ], [ [[DOTSI_UNFOLD_PHI2]], [[DOT5_SI_UNFOLD_FALSE]] ]
+; CHECK-NEXT: br i1 false, label [[SPEC_SELECT_SI_UNFOLD_FALSE1:%.*]], label [[SPEC_SELECT1_SI_UNFOLD_FALSE_JT1:%.*]]
+; CHECK: spec.select1.si.unfold.true.jt0:
+; CHECK-NEXT: [[DOT5_SI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ [[DOTSI_UNFOLD_PHI1]], [[DOT5_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI2_JT0]], [[DOT5_SI_UNFOLD_FALSE_JT0]] ]
+; CHECK-NEXT: br i1 false, label [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]], label [[SPEC_SELECT1_SI_UNFOLD_FALSE_JT0:%.*]]
+; CHECK: spec.select1.si.unfold.false:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI:%.*]] = phi i32 [ 0, [[SPEC_SELECT1_SI_UNFOLD_TRUE]] ]
+; CHECK-NEXT: br label [[SPEC_SELECT_SI_UNFOLD_FALSE1]]
+; CHECK: spec.select1.si.unfold.false.jt0:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ 0, [[SPEC_SELECT1_SI_UNFOLD_TRUE1]] ]
+; CHECK-NEXT: br label [[SPEC_SELECT_SI_UNFOLD_FALSE]]
+; CHECK: spec.select.si.unfold.false:
+; CHECK-NEXT: [[SPEC_SELECT1_SI_UNFOLD_PHI:%.*]] = phi i32 [ [[DOT5_SI_UNFOLD_PHI]], [[SPEC_SELECT1_SI_UNFOLD_TRUE1]] ], [ [[DOTSI_UNFOLD_PHI]], [[SPEC_SELECT1_SI_UNFOLD_FALSE_JT0]] ]
+; CHECK-NEXT: br label [[CLEANUP87:%.*]]
+; CHECK: spec.select.si.unfold.false.jt0:
+; CHECK-NEXT: [[SPEC_SELECT1_SI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ [[DOT5_SI_UNFOLD_PHI_JT0]], [[SPEC_SELECT1_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT1_SI_UNFOLD_FALSE_JT1]] ]
+; CHECK-NEXT: br label [[CLEANUP87_JT0]]
+; CHECK: .6.si.unfold.true:
+; CHECK-NEXT: br i1 false, label [[CLEANUP87]], label [[DOT6_SI_UNFOLD_FALSE_JT0:%.*]]
+; CHECK: .6.si.unfold.true.jt0:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI3:%.*]] = phi i32 [ 0, [[IF_ELSE]] ]
+; CHECK-NEXT: br i1 false, label [[CLEANUP87_JT0]], label [[DOT6_SI_UNFOLD_FALSE:%.*]]
+; CHECK: .6.si.unfold.false:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI4:%.*]] = phi i32 [ 0, [[DOT6_SI_UNFOLD_TRUE]] ]
+; CHECK-NEXT: br label [[CLEANUP87]]
+; CHECK: .6.si.unfold.false.jt0:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI4_JT0:%.*]] = phi i32 [ 0, [[DOT6_SI_UNFOLD_TRUE1:%.*]] ]
+; CHECK-NEXT: br label [[CLEANUP87_JT0]]
+; CHECK: cleanup87:
+; CHECK-NEXT: [[CLEANUP_DEST_SLOT_3:%.*]] = phi i32 [ [[SPEC_SELECT1_SI_UNFOLD_PHI]], [[SPEC_SELECT_SI_UNFOLD_FALSE1]] ], [ poison, [[DOT6_SI_UNFOLD_TRUE1]] ], [ [[DOTSI_UNFOLD_PHI4]], [[DOT6_SI_UNFOLD_FALSE]] ]
+; CHECK-NEXT: switch i32 [[CLEANUP_DEST_SLOT_3]], label [[FOR_COND48_BACKEDGE:%.*]] [
+; CHECK-NEXT: i32 0, label [[FOR_COND48_BACKEDGE]]
+; CHECK-NEXT: i32 1, label [[FOR_COND48_BACKEDGE]]
+; CHECK-NEXT: ]
+; CHECK: cleanup87.jt0:
+; CHECK-NEXT: [[CLEANUP_DEST_SLOT_3_JT0:%.*]] = phi i32 [ 0, [[FOR_COND48]] ], [ [[SPEC_SELECT1_SI_UNFOLD_PHI_JT0]], [[SPEC_SELECT_SI_UNFOLD_FALSE]] ], [ [[DOTSI_UNFOLD_PHI3]], [[DOT6_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI4_JT0]], [[DOT6_SI_UNFOLD_FALSE_JT0]] ]
+; CHECK-NEXT: br label [[FOR_COND48_BACKEDGE]]
+; CHECK: for.cond48.backedge:
+; CHECK-NEXT: br label [[FOR_COND48]]
+;
+entry:
+ %.5 = select i1 false, i32 0, i32 0
+ %.6 = select i1 false, i32 0, i32 0
+ br label %for.cond48
+
+for.cond48: ; preds = %for.cond48.backedge, %entry
+ br i1 false, label %cleanup87, label %if.else
+
+if.else: ; preds = %for.cond48
+ %spec.select1 = select i1 false, i32 %.5, i32 0
+ %spec.select = select i1 false, i32 %.6, i32 %spec.select1
+ br label %cleanup87
+
+cleanup87: ; preds = %if.else, %for.cond48
+ %cleanup.dest.slot.3 = phi i32 [ 0, %for.cond48 ], [ %spec.select, %if.else ]
+ switch i32 %cleanup.dest.slot.3, label %for.cond48.backedge [
+ i32 0, label %for.cond48.backedge
+ i32 1, label %for.cond48.backedge
+ ]
+
+for.cond48.backedge: ; preds = %cleanup87, %cleanup87, %cleanup87
+ br label %for.cond48
+}
diff --git a/llvm/test/Transforms/FunctionAttrs/nocapture.ll b/llvm/test/Transforms/FunctionAttrs/nocapture.ll
index 60a4214..8113ba65 100644
--- a/llvm/test/Transforms/FunctionAttrs/nocapture.ll
+++ b/llvm/test/Transforms/FunctionAttrs/nocapture.ll
@@ -1398,5 +1398,73 @@ define void @assume_nonnull(ptr %p) {
ret void
}
+define void @captures_metadata_address_is_null(ptr %x, ptr %y) {
+; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; FNATTRS-LABEL: define void @captures_metadata_address_is_null
+; FNATTRS-SAME: (ptr captures(address_is_null) [[X:%.*]], ptr writeonly captures(none) initializes((0, 8)) [[Y:%.*]]) #[[ATTR17]] {
+; FNATTRS-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META0:![0-9]+]]
+; FNATTRS-NEXT: ret void
+;
+; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; ATTRIBUTOR-LABEL: define void @captures_metadata_address_is_null
+; ATTRIBUTOR-SAME: (ptr nofree writeonly [[X:%.*]], ptr nofree nonnull writeonly captures(none) [[Y:%.*]]) #[[ATTR13]] {
+; ATTRIBUTOR-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META0:![0-9]+]]
+; ATTRIBUTOR-NEXT: ret void
+;
+ store ptr %x, ptr %y, !captures !{!"address_is_null"}
+ ret void
+}
+
+define void @captures_metadata_address(ptr %x, ptr %y) {
+; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; FNATTRS-LABEL: define void @captures_metadata_address
+; FNATTRS-SAME: (ptr captures(address) [[X:%.*]], ptr writeonly captures(none) initializes((0, 8)) [[Y:%.*]]) #[[ATTR17]] {
+; FNATTRS-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META1:![0-9]+]]
+; FNATTRS-NEXT: ret void
+;
+; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; ATTRIBUTOR-LABEL: define void @captures_metadata_address
+; ATTRIBUTOR-SAME: (ptr nofree writeonly [[X:%.*]], ptr nofree nonnull writeonly captures(none) [[Y:%.*]]) #[[ATTR13]] {
+; ATTRIBUTOR-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META1:![0-9]+]]
+; ATTRIBUTOR-NEXT: ret void
+;
+ store ptr %x, ptr %y, !captures !{!"address"}
+ ret void
+}
+
+define void @captures_metadata_address_read_provenance(ptr %x, ptr %y) {
+; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; FNATTRS-LABEL: define void @captures_metadata_address_read_provenance
+; FNATTRS-SAME: (ptr captures(address, read_provenance) [[X:%.*]], ptr writeonly captures(none) initializes((0, 8)) [[Y:%.*]]) #[[ATTR17]] {
+; FNATTRS-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META2:![0-9]+]]
+; FNATTRS-NEXT: ret void
+;
+; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; ATTRIBUTOR-LABEL: define void @captures_metadata_address_read_provenance
+; ATTRIBUTOR-SAME: (ptr nofree writeonly [[X:%.*]], ptr nofree nonnull writeonly captures(none) [[Y:%.*]]) #[[ATTR13]] {
+; ATTRIBUTOR-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META2:![0-9]+]]
+; ATTRIBUTOR-NEXT: ret void
+;
+ store ptr %x, ptr %y, !captures !{!"address", !"read_provenance"}
+ ret void
+}
+
+define void @captures_metadata_provenance(ptr %x, ptr %y) {
+; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; FNATTRS-LABEL: define void @captures_metadata_provenance
+; FNATTRS-SAME: (ptr captures(provenance) [[X:%.*]], ptr writeonly captures(none) initializes((0, 8)) [[Y:%.*]]) #[[ATTR17]] {
+; FNATTRS-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META3:![0-9]+]]
+; FNATTRS-NEXT: ret void
+;
+; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; ATTRIBUTOR-LABEL: define void @captures_metadata_provenance
+; ATTRIBUTOR-SAME: (ptr nofree writeonly [[X:%.*]], ptr nofree nonnull writeonly captures(none) [[Y:%.*]]) #[[ATTR13]] {
+; ATTRIBUTOR-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META3:![0-9]+]]
+; ATTRIBUTOR-NEXT: ret void
+;
+ store ptr %x, ptr %y, !captures !{!"provenance"}
+ ret void
+}
+
declare ptr @llvm.launder.invariant.group.p0(ptr)
declare ptr @llvm.strip.invariant.group.p0(ptr)
diff --git a/llvm/test/Transforms/GVN/condprop.ll b/llvm/test/Transforms/GVN/condprop.ll
index 15ffcbf..eb2a9f1 100644
--- a/llvm/test/Transforms/GVN/condprop.ll
+++ b/llvm/test/Transforms/GVN/condprop.ll
@@ -321,6 +321,66 @@ different:
ret i1 %cmp3
}
+define i1 @test6_phi1(i1 %c, i32 %x, i32 %y) {
+; CHECK-LABEL: @test6_phi1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ne i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], [[Y]]
+; CHECK-NEXT: br i1 [[CMP]], label [[BB2]], label [[BB3:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: [[PHI:%.*]] = phi i1 [ false, [[BB1]] ], [ true, [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i1 [[PHI]]
+; CHECK: bb3:
+; CHECK-NEXT: ret i1 false
+;
+entry:
+ %cmp.not = icmp ne i32 %x, %y
+ br i1 %c, label %bb1, label %bb2
+
+bb1:
+ %cmp = icmp eq i32 %x, %y
+ br i1 %cmp, label %bb2, label %bb3
+
+bb2:
+ %phi = phi i1 [ %cmp.not, %bb1 ], [ true, %entry ]
+ ret i1 %phi
+
+bb3:
+ ret i1 false
+}
+
+define i1 @test6_phi2(i1 %c, i32 %x, i32 %y) {
+; CHECK-LABEL: @test6_phi2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ne i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], [[Y]]
+; CHECK-NEXT: br i1 [[CMP]], label [[BB2]], label [[BB3:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: [[PHI:%.*]] = phi i1 [ [[CMP_NOT]], [[BB1]] ], [ true, [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i1 [[PHI]]
+; CHECK: bb3:
+; CHECK-NEXT: ret i1 false
+;
+entry:
+ br i1 %c, label %bb1, label %bb2
+
+bb1:
+ %cmp.not = icmp ne i32 %x, %y
+ %cmp = icmp eq i32 %x, %y
+ br i1 %cmp, label %bb2, label %bb3
+
+bb2:
+ %phi = phi i1 [ %cmp.not, %bb1 ], [ true, %entry ]
+ ret i1 %phi
+
+bb3:
+ ret i1 false
+}
+
define i1 @test7(i32 %x, i32 %y) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]]
diff --git a/llvm/test/Transforms/GlobalOpt/fastcc.ll b/llvm/test/Transforms/GlobalOpt/fastcc.ll
index 854357e..edbd602 100644
--- a/llvm/test/Transforms/GlobalOpt/fastcc.ll
+++ b/llvm/test/Transforms/GlobalOpt/fastcc.ll
@@ -1,16 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt < %s -passes=globalopt -S | FileCheck %s
declare token @llvm.call.preallocated.setup(i32)
declare ptr @llvm.call.preallocated.arg(token, i32)
define internal i32 @f(ptr %m) {
-; CHECK-LABEL: define internal fastcc i32 @f
+; CHECK-LABEL: define internal fastcc i32 @f(
+; CHECK-SAME: ptr [[M:%.*]]) unnamed_addr {
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[M]], align 4
+; CHECK-NEXT: ret i32 [[V]]
+;
%v = load i32, ptr %m
ret i32 %v
}
define internal x86_thiscallcc i32 @g(ptr %m) {
-; CHECK-LABEL: define internal fastcc i32 @g
+; CHECK-LABEL: define internal fastcc i32 @g(
+; CHECK-SAME: ptr [[M:%.*]]) unnamed_addr {
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[M]], align 4
+; CHECK-NEXT: ret i32 [[V]]
+;
%v = load i32, ptr %m
ret i32 %v
}
@@ -18,41 +27,80 @@ define internal x86_thiscallcc i32 @g(ptr %m) {
; Leave this one alone, because the user went out of their way to request this
; convention.
define internal coldcc i32 @h(ptr %m) {
-; CHECK-LABEL: define internal coldcc i32 @h
+; CHECK-LABEL: define internal coldcc i32 @h(
+; CHECK-SAME: ptr [[M:%.*]]) unnamed_addr {
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[M]], align 4
+; CHECK-NEXT: ret i32 [[V]]
+;
%v = load i32, ptr %m
ret i32 %v
}
define internal i32 @j(ptr %m) {
-; CHECK-LABEL: define internal i32 @j
+; CHECK-LABEL: define internal i32 @j(
+; CHECK-SAME: ptr [[M:%.*]]) {
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[M]], align 4
+; CHECK-NEXT: ret i32 [[V]]
+;
%v = load i32, ptr %m
ret i32 %v
}
define internal i32 @inalloca(ptr inalloca(i32) %p) {
-; CHECK-LABEL: define internal fastcc i32 @inalloca(ptr %p)
+; CHECK-LABEL: define internal fastcc i32 @inalloca(
+; CHECK-SAME: ptr [[P:%.*]]) unnamed_addr {
+; CHECK-NEXT: [[RV:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[RV]]
+;
%rv = load i32, ptr %p
ret i32 %rv
}
define i32 @inalloca2_caller(ptr inalloca(i32) %p) {
+; CHECK-LABEL: define i32 @inalloca2_caller(
+; CHECK-SAME: ptr inalloca(i32) [[P:%.*]]) local_unnamed_addr {
+; CHECK-NEXT: [[RV:%.*]] = musttail call i32 @inalloca2(ptr inalloca(i32) [[P]])
+; CHECK-NEXT: ret i32 [[RV]]
+;
%rv = musttail call i32 @inalloca2(ptr inalloca(i32) %p)
ret i32 %rv
}
define internal i32 @inalloca2(ptr inalloca(i32) %p) {
; Because of the musttail caller, this inalloca cannot be dropped.
-; CHECK-LABEL: define internal i32 @inalloca2(ptr inalloca(i32) %p)
+; CHECK-LABEL: define internal i32 @inalloca2(
+; CHECK-SAME: ptr inalloca(i32) [[P:%.*]]) unnamed_addr {
+; CHECK-NEXT: [[RV:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[RV]]
+;
%rv = load i32, ptr %p
ret i32 %rv
}
define internal i32 @preallocated(ptr preallocated(i32) %p) {
-; CHECK-LABEL: define internal fastcc i32 @preallocated(ptr %p)
+; CHECK-LABEL: define internal fastcc i32 @preallocated(
+; CHECK-SAME: ptr [[P:%.*]]) unnamed_addr {
+; CHECK-NEXT: [[RV:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[RV]]
+;
%rv = load i32, ptr %p
ret i32 %rv
}
define void @call_things() {
+; CHECK-LABEL: define void @call_things() local_unnamed_addr {
+; CHECK-NEXT: [[M:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP1:%.*]] = call fastcc i32 @f(ptr [[M]])
+; CHECK-NEXT: [[TMP2:%.*]] = call fastcc i32 @g(ptr [[M]])
+; CHECK-NEXT: [[TMP3:%.*]] = call coldcc i32 @h(ptr [[M]])
+; CHECK-NEXT: [[TMP4:%.*]] = call i32 @j(ptr [[M]])
+; CHECK-NEXT: [[ARGS:%.*]] = alloca inalloca i32, align 4
+; CHECK-NEXT: [[TMP5:%.*]] = call fastcc i32 @inalloca(ptr [[ARGS]])
+; CHECK-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave.p0()
+; CHECK-NEXT: [[PAARG:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP7:%.*]] = call fastcc i32 @preallocated(ptr [[PAARG]])
+; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP6]])
+; CHECK-NEXT: ret void
+;
%m = alloca i32
call i32 @f(ptr %m)
call x86_thiscallcc i32 @g(ptr %m)
@@ -65,15 +113,25 @@ define void @call_things() {
call i32 @preallocated(ptr preallocated(i32) %N) ["preallocated"(token %c)]
ret void
}
-; CHECK-LABEL: define void @call_things()
-; CHECK: call fastcc i32 @f
-; CHECK: call fastcc i32 @g
-; CHECK: call coldcc i32 @h
-; CHECK: call i32 @j
-; CHECK: call fastcc i32 @inalloca(ptr %args)
-; CHECK-NOT: llvm.call.preallocated
-; CHECK: call fastcc i32 @preallocated(ptr %paarg)
@llvm.used = appending global [1 x ptr] [
- ptr @j
+ ptr @j
], section "llvm.metadata"
+
+define internal i32 @assume_fastcc() {
+; CHECK-LABEL: define internal fastcc i32 @assume_fastcc() {
+; CHECK-NEXT: [[OBJSIZE:%.*]] = call i32 @llvm.objectsize.i32.p0(ptr @assume_fastcc, i1 false, i1 false, i1 false)
+; CHECK-NEXT: ret i32 [[OBJSIZE]]
+;
+ %objsize = call i32 @llvm.objectsize.i32.p0(ptr @assume_fastcc, i1 false, i1 false, i1 false)
+ ret i32 %objsize
+}
+
+define internal i32 @constexpr_self_user() addrspace(1) {
+; CHECK-LABEL: define internal fastcc i32 @constexpr_self_user() addrspace(1) {
+; CHECK-NEXT: [[OBJSIZE:%.*]] = call i32 @llvm.objectsize.i32.p0(ptr addrspacecast (ptr addrspace(1) @constexpr_self_user to ptr), i1 false, i1 false, i1 false)
+; CHECK-NEXT: ret i32 [[OBJSIZE]]
+;
+ %objsize = call i32 @llvm.objectsize.i32.p0(ptr addrspacecast (ptr addrspace(1) @constexpr_self_user to ptr), i1 false, i1 false, i1 false)
+ ret i32 %objsize
+}
diff --git a/llvm/test/Transforms/InstCombine/fcmp.ll b/llvm/test/Transforms/InstCombine/fcmp.ll
index 119cffd..d94e78c 100644
--- a/llvm/test/Transforms/InstCombine/fcmp.ll
+++ b/llvm/test/Transforms/InstCombine/fcmp.ll
@@ -1812,6 +1812,46 @@ define i1 @fcmp_ule_fsub_const(float %x, float %y) {
ret i1 %cmp
}
+define i1 @fcmp_ninf_ule_fsub_const(float %x, float %y) {
+; CHECK-LABEL: @fcmp_ninf_ule_fsub_const(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ule float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %fs = fsub float %x, %y
+ %cmp = fcmp ninf ule float %fs, 0.000000e+00
+ ret i1 %cmp
+}
+
+define i1 @fcmp_nnan_ule_fsub_const(float %x, float %y) {
+; CHECK-LABEL: @fcmp_nnan_ule_fsub_const(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp nnan ule float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %fs = fsub float %x, %y
+ %cmp = fcmp nnan ule float %fs, 0.000000e+00
+ ret i1 %cmp
+}
+
+define i1 @fcmp_ule_fsub_ninf_const(float %x, float %y) {
+; CHECK-LABEL: @fcmp_ule_fsub_ninf_const(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ninf ule float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %fs = fsub ninf float %x, %y
+ %cmp = fcmp ule float %fs, 0.000000e+00
+ ret i1 %cmp
+}
+
+define i1 @fcmp_ule_fsub_nnan_const(float %x, float %y) {
+; CHECK-LABEL: @fcmp_ule_fsub_nnan_const(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp nnan ule float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %fs = fsub nnan float %x, %y
+ %cmp = fcmp ule float %fs, 0.000000e+00
+ ret i1 %cmp
+}
+
define i1 @fcmp_ugt_fsub_const(float %x, float %y) {
; CHECK-LABEL: @fcmp_ugt_fsub_const(
; CHECK-NEXT: [[FS:%.*]] = fsub float [[X:%.*]], [[Y:%.*]]
diff --git a/llvm/test/Transforms/InstCombine/freeze-phi.ll b/llvm/test/Transforms/InstCombine/freeze-phi.ll
index cdc9a5e..62bb9dc3 100644
--- a/llvm/test/Transforms/InstCombine/freeze-phi.ll
+++ b/llvm/test/Transforms/InstCombine/freeze-phi.ll
@@ -212,3 +212,31 @@ D:
%y.fr = freeze i32 %y
ret i32 %y.fr
}
+
+; Make sure that fmf in phi node is dropped when freeze get folded.
+
+define float @pr161524(float noundef %arg) {
+; CHECK-LABEL: @pr161524(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[COND:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[ARG:%.*]], i32 144)
+; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN:%.*]], label [[IF_EXIT:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[FADD:%.*]] = fadd float [[ARG]], 1.000000e+00
+; CHECK-NEXT: br label [[IF_EXIT]]
+; CHECK: if.exit:
+; CHECK-NEXT: [[RET:%.*]] = phi float [ [[FADD]], [[IF_THEN]] ], [ [[ARG]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %cond = tail call i1 @llvm.is.fpclass.f32(float %arg, i32 144)
+ br i1 %cond, label %if.then, label %if.exit
+
+if.then:
+ %fadd = fadd float %arg, 1.0
+ br label %if.exit
+
+if.exit:
+ %ret = phi ninf float [ %fadd, %if.then ], [ %arg, %entry ]
+ %ret.fr = freeze float %ret
+ ret float %ret.fr
+}
diff --git a/llvm/test/Transforms/InstCombine/freeze.ll b/llvm/test/Transforms/InstCombine/freeze.ll
index af5cb0c..ac7d65c 100644
--- a/llvm/test/Transforms/InstCombine/freeze.ll
+++ b/llvm/test/Transforms/InstCombine/freeze.ll
@@ -1464,6 +1464,27 @@ define ptr @freeze_ptrmask_nonnull(ptr %p, i64 noundef %m) {
ret ptr %fr
}
+define i64 @pr161492_1(i1 %cond) {
+; CHECK-LABEL: define i64 @pr161492_1(
+; CHECK-SAME: i1 [[COND:%.*]]) {
+; CHECK-NEXT: ret i64 0
+;
+ %fr1 = freeze i64 poison
+ %fr2 = freeze i64 poison
+ %ret = select i1 %cond, i64 %fr1, i64 %fr2
+ ret i64 %ret
+}
+
+define i64 @pr161492_2(i1 %cond) {
+; CHECK-LABEL: define i64 @pr161492_2(
+; CHECK-SAME: i1 [[COND:%.*]]) {
+; CHECK-NEXT: ret i64 0
+;
+ %fr = freeze i64 poison
+ %ret = select i1 %cond, i64 %fr, i64 %fr
+ ret i64 %ret
+}
+
!0 = !{}
!1 = !{i64 4}
!2 = !{i32 0, i32 100}
diff --git a/llvm/test/Transforms/InstCombine/funnel.ll b/llvm/test/Transforms/InstCombine/funnel.ll
index 0e5f046..e573108 100644
--- a/llvm/test/Transforms/InstCombine/funnel.ll
+++ b/llvm/test/Transforms/InstCombine/funnel.ll
@@ -635,3 +635,29 @@ define i32 @test_rotl_and_neg_wrong_mask(i32 %x, i32 %shamt) {
%or = or i32 %shl, %shr
ret i32 %or
}
+
+declare void @use(i16)
+
+; Make sure the reused result does not produce poison.
+
+define i16 @fshl_concat_vector_may_produce_poison(i4 %x, i12 %y) {
+; CHECK-LABEL: @fshl_concat_vector_may_produce_poison(
+; CHECK-NEXT: [[X_FR:%.*]] = freeze i4 [[X:%.*]]
+; CHECK-NEXT: [[ZEXT_X:%.*]] = zext i4 [[X_FR]] to i16
+; CHECK-NEXT: [[SLX:%.*]] = shl nuw i16 [[ZEXT_X]], 12
+; CHECK-NEXT: [[ZEXT_Y:%.*]] = zext i12 [[Y:%.*]] to i16
+; CHECK-NEXT: [[XY:%.*]] = or disjoint i16 [[SLX]], [[ZEXT_Y]]
+; CHECK-NEXT: call void @use(i16 [[XY]])
+; CHECK-NEXT: [[YX:%.*]] = call i16 @llvm.fshl.i16(i16 [[XY]], i16 [[XY]], i16 4)
+; CHECK-NEXT: ret i16 [[YX]]
+;
+ %x.fr = freeze i4 %x
+ %zext.x = zext i4 %x.fr to i16
+ %slx = shl nuw nsw i16 %zext.x, 12
+ %zext.y = zext i12 %y to i16
+ %xy = or disjoint i16 %slx, %zext.y
+ call void @use(i16 %xy)
+ %sly = shl nuw i16 %zext.y, 4
+ %yx = or disjoint i16 %sly, %zext.x
+ ret i16 %yx
+}
diff --git a/llvm/test/Transforms/InstCombine/icmp-clamp.ll b/llvm/test/Transforms/InstCombine/icmp-clamp.ll
new file mode 100644
index 0000000..4866dbf
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/icmp-clamp.ll
@@ -0,0 +1,295 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+declare void @use(i32)
+
+define i1 @test_i32_eq(i32 %x) {
+; CHECK-LABEL: define i1 @test_i32_eq(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], 95
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], 256
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95)
+ %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160)
+ %cmp = icmp eq i32 %v2, %x
+ ret i1 %cmp
+}
+
+define i1 @test_i32_ne(i32 %x) {
+; CHECK-LABEL: define i1 @test_i32_ne(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], -161
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], -256
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95)
+ %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160)
+ %cmp = icmp ne i32 %v2, %x
+ ret i1 %cmp
+}
+
+define i1 @test_i32_eq_no_add(i32 %x) {
+; CHECK-LABEL: define i1 @test_i32_eq_no_add(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X]], 161
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 0)
+ %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160)
+ %cmp = icmp eq i32 %v2, %x
+ ret i1 %cmp
+}
+
+define i1 @test_i32_ne_no_add(i32 %x) {
+; CHECK-LABEL: define i1 @test_i32_ne_no_add(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X]], 160
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 0)
+ %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160)
+ %cmp = icmp ne i32 %v2, %x
+ ret i1 %cmp
+}
+
+define i1 @test_unsigned_eq(i32 %x) {
+; CHECK-LABEL: define i1 @test_unsigned_eq(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], -10
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], 91
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.umax.i32(i32 %x, i32 10)
+ %v2 = tail call i32 @llvm.umin.i32(i32 %v1, i32 100)
+ %cmp = icmp eq i32 %v2, %x
+ ret i1 %cmp
+}
+
+define i1 @test_unsigned_ne(i32 %x) {
+; CHECK-LABEL: define i1 @test_unsigned_ne(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], -101
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], -91
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.umax.i32(i32 %x, i32 10)
+ %v2 = tail call i32 @llvm.umin.i32(i32 %v1, i32 100)
+ %cmp = icmp ne i32 %v2, %x
+ ret i1 %cmp
+}
+
+
+; Different bit widths
+define i1 @test_i8_eq(i8 %x) {
+; CHECK-LABEL: define i1 @test_i8_eq(
+; CHECK-SAME: i8 [[X:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], 50
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[TMP1]], 101
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i8 @llvm.smax.i8(i8 %x, i8 -50)
+ %v2 = tail call i8 @llvm.smin.i8(i8 %v1, i8 50)
+ %cmp = icmp eq i8 %v2, %x
+ ret i1 %cmp
+}
+
+define i1 @test_i16_eq(i16 %x) {
+; CHECK-LABEL: define i1 @test_i16_eq(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = add i16 [[X]], 1000
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i16 [[TMP1]], 2001
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i16 @llvm.smax.i16(i16 %x, i16 -1000)
+ %v2 = tail call i16 @llvm.smin.i16(i16 %v1, i16 1000)
+ %cmp = icmp eq i16 %v2, %x
+ ret i1 %cmp
+}
+
+define i1 @test_i64_eq(i64 %x) {
+; CHECK-LABEL: define i1 @test_i64_eq(
+; CHECK-SAME: i64 [[X:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[X]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP1]], -1
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i64 @llvm.smax.i64(i64 %x, i64 -1)
+ %v2 = tail call i64 @llvm.smin.i64(i64 %v1, i64 9223372036854775806)
+ %cmp = icmp eq i64 %v2, %x
+ ret i1 %cmp
+}
+
+; Negative tests - wrong predicate
+define i1 @test_wrong_pred_slt(i32 %x) {
+; CHECK-LABEL: define i1 @test_wrong_pred_slt(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 160
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95)
+ %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160)
+ %cmp = icmp slt i32 %v2, %x
+ ret i1 %cmp
+}
+
+
+; Negative tests - not a clamp pattern
+define i1 @test_not_clamp_pattern(i32 %x, i32 %y) {
+; CHECK-LABEL: define i1 @test_not_clamp_pattern(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[Y]], i32 -95)
+; CHECK-NEXT: [[V2:%.*]] = tail call i32 @llvm.smin.i32(i32 [[V1]], i32 160)
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V2]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.smax.i32(i32 %y, i32 -95)
+ %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160)
+ %cmp = icmp eq i32 %v2, %x
+ ret i1 %cmp
+}
+
+; Negative tests - Lo >= Hi
+define i1 @test_invalid_range(i32 %x) {
+; CHECK-LABEL: define i1 @test_invalid_range(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 50
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 100)
+ %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 50)
+ %cmp = icmp eq i32 %v2, %x
+ ret i1 %cmp
+}
+
+; Negative tests - Lo is minimum signed value
+define i1 @test_lo_min_signed(i32 %x) {
+; CHECK-LABEL: define i1 @test_lo_min_signed(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X]], 161
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -2147483648)
+ %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160)
+ %cmp = icmp eq i32 %v2, %x
+ ret i1 %cmp
+}
+
+; Negative tests - Hi is maximum signed value
+define i1 @test_hi_max_signed(i32 %x) {
+; CHECK-LABEL: define i1 @test_hi_max_signed(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], -96
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95)
+ %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 2147483647)
+ %cmp = icmp eq i32 %v2, %x
+ ret i1 %cmp
+}
+
+; Negative tests - Hi is maximum unsigned value
+define i1 @test_hi_max_unsigned(i32 %x) {
+; CHECK-LABEL: define i1 @test_hi_max_unsigned(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X]], 9
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.umax.i32(i32 %x, i32 10)
+ %v2 = tail call i32 @llvm.umin.i32(i32 %v1, i32 4294967295)
+ %cmp = icmp eq i32 %v2, %x
+ ret i1 %cmp
+}
+
+; Multi-use tests - multiple uses of max
+define i1 @test_multi_use_max(i32 %x) {
+; CHECK-LABEL: define i1 @test_multi_use_max(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[X]], i32 -95)
+; CHECK-NEXT: call void @use(i32 [[V1]])
+; CHECK-NEXT: [[V2:%.*]] = tail call i32 @llvm.smin.i32(i32 [[V1]], i32 160)
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V2]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95)
+ call void @use(i32 %v1)
+ %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160)
+ %cmp = icmp eq i32 %v2, %x
+ ret i1 %cmp
+}
+
+; Multi-use tests - multiple uses of min
+define i1 @test_multi_use_min(i32 %x) {
+; CHECK-LABEL: define i1 @test_multi_use_min(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[X]], i32 -95)
+; CHECK-NEXT: [[V2:%.*]] = tail call i32 @llvm.smin.i32(i32 [[V1]], i32 160)
+; CHECK-NEXT: call void @use(i32 [[V2]])
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V2]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95)
+ %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160)
+ call void @use(i32 %v2)
+ %cmp = icmp eq i32 %v2, %x
+ ret i1 %cmp
+}
+
+; Commuted tests
+define i1 @test_commuted_eq(i32 %x) {
+; CHECK-LABEL: define i1 @test_commuted_eq(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], 95
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], 256
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95)
+ %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160)
+ %cmp = icmp eq i32 %x, %v2
+ ret i1 %cmp
+}
+
+
+; Vector tests - splat constants
+define <2 x i1> @test_vec_splat_eq(<2 x i32> %x) {
+; CHECK-LABEL: define <2 x i1> @test_vec_splat_eq(
+; CHECK-SAME: <2 x i32> [[X:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[X]], splat (i32 50)
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i32> [[TMP1]], splat (i32 101)
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %v1 = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %x, <2 x i32> <i32 -50, i32 -50>)
+ %v2 = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %v1, <2 x i32> <i32 50, i32 50>)
+ %cmp = icmp eq <2 x i32> %v2, %x
+ ret <2 x i1> %cmp
+}
+
+; Vector tests - poison elements
+define <2 x i1> @test_vec_poison_eq(<2 x i32> %x) {
+; CHECK-LABEL: define <2 x i1> @test_vec_poison_eq(
+; CHECK-SAME: <2 x i32> [[X:%.*]]) {
+; CHECK-NEXT: [[V1:%.*]] = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> [[X]], <2 x i32> <i32 -50, i32 poison>)
+; CHECK-NEXT: [[V2:%.*]] = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[V1]], <2 x i32> <i32 50, i32 poison>)
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[V2]], [[X]]
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %v1 = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %x, <2 x i32> <i32 -50, i32 poison>)
+ %v2 = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %v1, <2 x i32> <i32 50, i32 poison>)
+ %cmp = icmp eq <2 x i32> %v2, %x
+ ret <2 x i1> %cmp
+}
+
+; Vector tests - non-splat
+define <2 x i1> @test_vec_non_splat_eq(<2 x i32> %x) {
+; CHECK-LABEL: define <2 x i1> @test_vec_non_splat_eq(
+; CHECK-SAME: <2 x i32> [[X:%.*]]) {
+; CHECK-NEXT: [[V1:%.*]] = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> [[X]], <2 x i32> <i32 -50, i32 -30>)
+; CHECK-NEXT: [[V2:%.*]] = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[V1]], <2 x i32> <i32 50, i32 70>)
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[V2]], [[X]]
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %v1 = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %x, <2 x i32> <i32 -50, i32 -30>)
+ %v2 = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %v1, <2 x i32> <i32 50, i32 70>)
+ %cmp = icmp eq <2 x i32> %v2, %x
+ ret <2 x i1> %cmp
+}
diff --git a/llvm/test/Transforms/InstCombine/in-freeze-phi.ll b/llvm/test/Transforms/InstCombine/in-freeze-phi.ll
new file mode 100644
index 0000000..917d81b
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/in-freeze-phi.ll
@@ -0,0 +1,274 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=instcombine -S < %s | FileCheck %s
+
+define i32 @phi_freeze_same_consts(i1 %c0, i1 %c1) {
+; CHECK-LABEL: define i32 @phi_freeze_same_consts(
+; CHECK-SAME: i1 [[C0:%.*]], i1 [[C1:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[C0]], label %[[BB_FREEZE:.*]], label %[[BB_OTHER:.*]]
+; CHECK: [[BB_FREEZE]]:
+; CHECK-NEXT: br label %[[FINAL:.*]]
+; CHECK: [[BB_OTHER]]:
+; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]]
+; CHECK: [[CA]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[CB]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[FINAL]]:
+; CHECK-NEXT: ret i32 42
+;
+entry:
+ br i1 %c0, label %bb_freeze, label %bb_other
+
+bb_freeze:
+ %f = freeze i32 undef
+ br label %final
+
+bb_other:
+ br i1 %c1, label %cA, label %cB
+cA:
+ br label %final
+cB:
+ br label %final
+
+final:
+ %phi = phi i32 [ %f, %bb_freeze ], [ 42, %cA ], [ 42, %cB ]
+ ret i32 %phi
+}
+
+define i32 @phi_freeze_mixed_consts(i1 %c0, i1 %c1) {
+; CHECK-LABEL: define i32 @phi_freeze_mixed_consts(
+; CHECK-SAME: i1 [[C0:%.*]], i1 [[C1:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[C0]], label %[[BB_FREEZE:.*]], label %[[BB_OTHER:.*]]
+; CHECK: [[BB_FREEZE]]:
+; CHECK-NEXT: br label %[[FINAL:.*]]
+; CHECK: [[BB_OTHER]]:
+; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]]
+; CHECK: [[CA]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[CB]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[FINAL]]:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 0, %[[BB_FREEZE]] ], [ 42, %[[CA]] ], [ 7, %[[CB]] ]
+; CHECK-NEXT: ret i32 [[PHI]]
+;
+entry:
+ br i1 %c0, label %bb_freeze, label %bb_other
+
+bb_freeze:
+ %f = freeze i32 undef
+ br label %final
+
+bb_other:
+ br i1 %c1, label %cA, label %cB
+cA:
+ br label %final
+cB:
+ br label %final
+
+final:
+ %phi = phi i32 [ %f, %bb_freeze ], [ 42, %cA ], [ 7, %cB ]
+ ret i32 %phi
+}
+
+define i32 @phi_freeze_with_nonconst_incoming(i32 %x, i1 %c0, i1 %c1) {
+; CHECK-LABEL: define i32 @phi_freeze_with_nonconst_incoming(
+; CHECK-SAME: i32 [[X:%.*]], i1 [[C0:%.*]], i1 [[C1:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[C0]], label %[[BB_FREEZE:.*]], label %[[BB_OTHER:.*]]
+; CHECK: [[BB_FREEZE]]:
+; CHECK-NEXT: br label %[[FINAL:.*]]
+; CHECK: [[BB_OTHER]]:
+; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]]
+; CHECK: [[CA]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[CB]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[FINAL]]:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 0, %[[BB_FREEZE]] ], [ [[X]], %[[CA]] ], [ 13, %[[CB]] ]
+; CHECK-NEXT: ret i32 [[PHI]]
+;
+entry:
+ br i1 %c0, label %bb_freeze, label %bb_other
+
+bb_freeze:
+ %f = freeze i32 undef
+ br label %final
+
+bb_other:
+ br i1 %c1, label %cA, label %cB
+cA:
+ br label %final
+cB:
+ br label %final
+
+final:
+ %phi = phi i32 [ %f, %bb_freeze ], [ %x, %cA ], [ 13, %cB ]
+ ret i32 %phi
+}
+
+define <4 x i8> @phi_freeze_vector(i1 %c0, i1 %c1) {
+; CHECK-LABEL: define <4 x i8> @phi_freeze_vector(
+; CHECK-SAME: i1 [[C0:%.*]], i1 [[C1:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[C0]], label %[[BB_FREEZE:.*]], label %[[BB_OTHER:.*]]
+; CHECK: [[BB_FREEZE]]:
+; CHECK-NEXT: br label %[[FINAL:.*]]
+; CHECK: [[BB_OTHER]]:
+; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]]
+; CHECK: [[CA]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[CB]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[FINAL]]:
+; CHECK-NEXT: ret <4 x i8> splat (i8 9)
+;
+entry:
+ br i1 %c0, label %bb_freeze, label %bb_other
+
+bb_freeze:
+ %f = freeze <4 x i8> undef
+ br label %final
+
+bb_other:
+ br i1 %c1, label %cA, label %cB
+
+cA:
+ br label %final
+
+cB:
+ br label %final
+
+final:
+ %phi = phi <4 x i8> [ %f, %bb_freeze ],
+ [<i8 9, i8 9, i8 9, i8 9>, %cA ],
+ [<i8 9, i8 9, i8 9, i8 9>, %cB ]
+ ret <4 x i8> %phi
+}
+
+define i32 @multi_use_one_folds_one_not_zero(i1 %c0, i1 %c1, i1 %c2) {
+; CHECK-LABEL: define i32 @multi_use_one_folds_one_not_zero(
+; CHECK-SAME: i1 [[C0:%.*]], i1 [[C1:%.*]], i1 [[C2:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[C0]], label %[[BB_OTHER3:.*]], label %[[CC1:.*]]
+; CHECK: [[BB_OTHER3]]:
+; CHECK-NEXT: br label %[[MID:.*]]
+; CHECK: [[CC1]]:
+; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]]
+; CHECK: [[CA]]:
+; CHECK-NEXT: br label %[[MID]]
+; CHECK: [[CB]]:
+; CHECK-NEXT: br label %[[MID]]
+; CHECK: [[MID]]:
+; CHECK-NEXT: [[PHI_FOLD:%.*]] = phi i32 [ 0, %[[BB_OTHER3]] ], [ 1, %[[CA]] ], [ 1, %[[CB]] ]
+; CHECK-NEXT: br i1 [[C2]], label %[[BB_FREEZE2:.*]], label %[[CD:.*]]
+; CHECK: [[BB_FREEZE2]]:
+; CHECK-NEXT: br label %[[FINAL:.*]]
+; CHECK: [[BB_OTHER2:.*:]]
+; CHECK-NEXT: br i1 true, label %[[CA]], label %[[CB]]
+; CHECK: [[CC:.*:]]
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[CD]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[FINAL]]:
+; CHECK-NEXT: ret i32 [[PHI_FOLD]]
+;
+entry:
+ %f = freeze i32 undef
+ br i1 %c0, label %bb_freeze, label %bb_other
+bb_freeze:
+ br label %mid
+bb_other:
+ br i1 %c1, label %cA, label %cB
+cA:
+ br label %mid
+cB:
+ br label %mid
+mid:
+ %phi_no_fold = phi i32 [ %f, %bb_freeze ], [ 1, %cA ], [ 1, %cB ]
+ br i1 %c2, label %bb_freeze2, label %cD
+bb_freeze2:
+ br label %final
+bb_other2:
+ br i1 %c1, label %cA, label %cB
+cC:
+ br label %final
+cD:
+ br label %final
+final:
+ %phi_fold = phi i32 [ %f, %bb_freeze2 ], [ 0, %cC ], [ 0, %cD ]
+ %a = add i32 %phi_fold, %phi_no_fold
+ ret i32 %a
+}
+
+define i32 @phi_freeze_poison(i1 %c0, i1 %c1) {
+; CHECK-LABEL: define i32 @phi_freeze_poison(
+; CHECK-SAME: i1 [[C0:%.*]], i1 [[C1:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[C0]], label %[[BB_FREEZE:.*]], label %[[BB_OTHER:.*]]
+; CHECK: [[BB_FREEZE]]:
+; CHECK-NEXT: br label %[[FINAL:.*]]
+; CHECK: [[BB_OTHER]]:
+; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]]
+; CHECK: [[CA]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[CB]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[FINAL]]:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ br i1 %c0, label %bb_freeze, label %bb_other
+
+bb_freeze:
+ %f = freeze i32 undef
+ br label %final
+
+bb_other:
+ br i1 %c1, label %cA, label %cB
+cA:
+ br label %final
+cB:
+ br label %final
+
+final:
+ %phi = phi i32 [ %f, %bb_freeze ], [ poison, %cA ], [ poison, %cB ]
+ ret i32 %phi
+}
+
+define <2 x i32> @phi_freeze_poison_vec(i1 %c0, i1 %c1) {
+; CHECK-LABEL: define <2 x i32> @phi_freeze_poison_vec(
+; CHECK-SAME: i1 [[C0:%.*]], i1 [[C1:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[C0]], label %[[BB_FREEZE:.*]], label %[[BB_OTHER:.*]]
+; CHECK: [[BB_FREEZE]]:
+; CHECK-NEXT: br label %[[FINAL:.*]]
+; CHECK: [[BB_OTHER]]:
+; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]]
+; CHECK: [[CA]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[CB]]:
+; CHECK-NEXT: br label %[[FINAL]]
+; CHECK: [[FINAL]]:
+; CHECK-NEXT: [[PHI:%.*]] = phi <2 x i32> [ zeroinitializer, %[[BB_FREEZE]] ], [ <i32 poison, i32 1>, %[[CA]] ], [ <i32 poison, i32 1>, %[[CB]] ]
+; CHECK-NEXT: ret <2 x i32> [[PHI]]
+;
+entry:
+ br i1 %c0, label %bb_freeze, label %bb_other
+
+bb_freeze:
+ %f = freeze <2 x i32> undef
+ br label %final
+
+bb_other:
+ br i1 %c1, label %cA, label %cB
+cA:
+ br label %final
+cB:
+ br label %final
+
+final:
+ %phi = phi <2 x i32> [ %f, %bb_freeze ], [ <i32 poison, i32 1>, %cA ], [ <i32 poison, i32 1>, %cB ]
+ ret <2 x i32> %phi
+}
diff --git a/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check-dl.ll b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check-dl.ll
new file mode 100644
index 0000000..14a4c95
--- /dev/null
+++ b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check-dl.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 6
+; RUN: opt -passes=loop-idiom -S %s | FileCheck %s
+
+target datalayout = "p:16:16"
+
+;.
+; CHECK: @.crctable = private constant [256 x i32] zeroinitializer
+;.
+define void @test_with_dl() {
+; CHECK-LABEL: define void @test_with_dl() {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[PH:.*]]
+; CHECK: [[PH_LOOPEXIT:.*]]:
+; CHECK-NEXT: [[CRC_NEXT_LCSSA:%.*]] = phi i32 [ [[CRC_NEXT3:%.*]], %[[LOOP:.*]] ]
+; CHECK-NEXT: br label %[[PH]]
+; CHECK: [[PH]]:
+; CHECK-NEXT: [[CRC_USE:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[CRC_NEXT_LCSSA]], %[[PH_LOOPEXIT]] ]
+; CHECK-NEXT: br label %[[LOOP]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[CRC2:%.*]] = phi i32 [ 0, %[[PH]] ], [ [[CRC_NEXT3]], %[[LOOP]] ]
+; CHECK-NEXT: [[INDEXER_LO:%.*]] = trunc i32 [[CRC2]] to i8
+; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_LO]] to i16
+; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i32, ptr @.crctable, i16 [[INDEXER_EXT]]
+; CHECK-NEXT: [[TBL_LD:%.*]] = load i32, ptr [[TBL_PTRADD]], align 4
+; CHECK-NEXT: [[CRC_LE_SHIFT:%.*]] = lshr i32 [[CRC2]], 8
+; CHECK-NEXT: [[CRC_NEXT3]] = xor i32 [[CRC_LE_SHIFT]], [[TBL_LD]]
+; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
+; CHECK-NEXT: [[EXIT_COND1:%.*]] = icmp ne i16 [[IV]], 0
+; CHECK-NEXT: br i1 [[EXIT_COND1]], label %[[LOOP]], label %[[PH_LOOPEXIT]]
+;
+entry:
+ br label %ph
+
+ph:
+ %crc.use = phi i32 [ 0, %entry ], [ %crc.next, %loop ]
+ br label %loop
+
+loop:
+ %iv = phi i16 [ 0, %ph ], [ %iv.next, %loop ]
+ %crc = phi i32 [ 0, %ph ], [ %crc.next, %loop ]
+ %lshr.crc.1 = lshr i32 %crc, 1
+ %crc.and.1 = and i32 %crc, 1
+ %sb.check = icmp eq i32 %crc.and.1, 0
+ %xor = xor i32 %lshr.crc.1, 0
+ %crc.next = select i1 %sb.check, i32 %lshr.crc.1, i32 %xor
+ %iv.next = add i16 %iv, 1
+ %exit.cond = icmp ult i16 %iv, 7
+ br i1 %exit.cond, label %loop, label %ph
+}
diff --git a/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll
index 51dc142..b2ec53c 100644
--- a/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll
+++ b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll
@@ -118,8 +118,8 @@ define i16 @crc16.le.tc16(i16 %msg, i16 %checksum) {
; CHECK-NEXT: [[IV_INDEXER:%.*]] = zext i8 [[IV_BITS]] to i16
; CHECK-NEXT: [[DATA_INDEXER:%.*]] = lshr i16 [[MSG]], [[IV_INDEXER]]
; CHECK-NEXT: [[CRC_DATA_INDEXER:%.*]] = xor i16 [[DATA_INDEXER]], [[CRC2]]
-; CHECK-NEXT: [[INDEXER_LO:%.*]] = and i16 [[CRC_DATA_INDEXER]], 255
-; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_LO]] to i64
+; CHECK-NEXT: [[INDEXER_LO:%.*]] = trunc i16 [[CRC_DATA_INDEXER]] to i8
+; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_LO]] to i64
; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i16, ptr @.crctable.2, i64 [[INDEXER_EXT]]
; CHECK-NEXT: [[TBL_LD:%.*]] = load i16, ptr [[TBL_PTRADD]], align 2
; CHECK-NEXT: [[CRC_LE_SHIFT:%.*]] = lshr i16 [[CRC2]], 8
@@ -166,8 +166,8 @@ define i8 @crc8.le.tc16(i16 %msg, i8 %checksum) {
; CHECK-NEXT: [[DATA_INDEXER:%.*]] = lshr i16 [[MSG]], [[IV_INDEXER]]
; CHECK-NEXT: [[CRC_INDEXER_CAST:%.*]] = zext i8 [[CRC2]] to i16
; CHECK-NEXT: [[CRC_DATA_INDEXER:%.*]] = xor i16 [[DATA_INDEXER]], [[CRC_INDEXER_CAST]]
-; CHECK-NEXT: [[INDEXER_LO:%.*]] = and i16 [[CRC_DATA_INDEXER]], 255
-; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_LO]] to i64
+; CHECK-NEXT: [[INDEXER_LO:%.*]] = trunc i16 [[CRC_DATA_INDEXER]] to i8
+; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_LO]] to i64
; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i8, ptr @.crctable.3, i64 [[INDEXER_EXT]]
; CHECK-NEXT: [[TBL_LD]] = load i8, ptr [[TBL_PTRADD]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i8 [[IV]], 1
@@ -212,8 +212,8 @@ define i16 @crc16.be.tc8.crc.init.li(i16 %checksum, i8 %msg) {
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[CRC2:%.*]] = phi i16 [ [[CRC_INIT]], %[[ENTRY]] ], [ [[CRC_NEXT3:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[INDEXER_HI:%.*]] = lshr i16 [[CRC2]], 8
-; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = and i16 [[INDEXER_HI]], 255
-; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_HI_LO_BYTE]] to i64
+; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = trunc i16 [[INDEXER_HI]] to i8
+; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_HI_LO_BYTE]] to i64
; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i16, ptr @.crctable.4, i64 [[INDEXER_EXT]]
; CHECK-NEXT: [[TBL_LD:%.*]] = load i16, ptr [[TBL_PTRADD]], align 2
; CHECK-NEXT: [[CRC_BE_SHIFT:%.*]] = shl i16 [[CRC2]], 8
@@ -255,8 +255,8 @@ define i16 @crc16.be.tc8.crc.init.arg(i16 %crc.init) {
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[CRC2:%.*]] = phi i16 [ [[CRC_INIT]], %[[ENTRY]] ], [ [[CRC_NEXT3:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[INDEXER_HI:%.*]] = lshr i16 [[CRC2]], 8
-; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = and i16 [[INDEXER_HI]], 255
-; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_HI_LO_BYTE]] to i64
+; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = trunc i16 [[INDEXER_HI]] to i8
+; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_HI_LO_BYTE]] to i64
; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i16, ptr @.crctable.5, i64 [[INDEXER_EXT]]
; CHECK-NEXT: [[TBL_LD:%.*]] = load i16, ptr [[TBL_PTRADD]], align 2
; CHECK-NEXT: [[CRC_BE_SHIFT:%.*]] = shl i16 [[CRC2]], 8
@@ -295,8 +295,8 @@ define i16 @crc16.be.tc8.crc.init.arg.flipped.sb.check(i16 %crc.init) {
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[CRC2:%.*]] = phi i16 [ [[CRC_INIT]], %[[ENTRY]] ], [ [[CRC_NEXT3:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[INDEXER_HI:%.*]] = lshr i16 [[CRC2]], 8
-; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = and i16 [[INDEXER_HI]], 255
-; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_HI_LO_BYTE]] to i64
+; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = trunc i16 [[INDEXER_HI]] to i8
+; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_HI_LO_BYTE]] to i64
; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i16, ptr @.crctable.6, i64 [[INDEXER_EXT]]
; CHECK-NEXT: [[TBL_LD:%.*]] = load i16, ptr [[TBL_PTRADD]], align 2
; CHECK-NEXT: [[CRC_BE_SHIFT:%.*]] = shl i16 [[CRC2]], 8
@@ -406,8 +406,8 @@ define i32 @crc32.le.tc8.data32(i32 %checksum, i32 %msg) {
; CHECK-NEXT: [[IV_INDEXER:%.*]] = zext i8 [[IV_BITS]] to i32
; CHECK-NEXT: [[DATA_INDEXER:%.*]] = lshr i32 [[MSG]], [[IV_INDEXER]]
; CHECK-NEXT: [[CRC_DATA_INDEXER:%.*]] = xor i32 [[DATA_INDEXER]], [[CRC2]]
-; CHECK-NEXT: [[INDEXER_LO:%.*]] = and i32 [[CRC_DATA_INDEXER]], 255
-; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i32 [[INDEXER_LO]] to i64
+; CHECK-NEXT: [[INDEXER_LO:%.*]] = trunc i32 [[CRC_DATA_INDEXER]] to i8
+; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_LO]] to i64
; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i32, ptr @.crctable.8, i64 [[INDEXER_EXT]]
; CHECK-NEXT: [[TBL_LD:%.*]] = load i32, ptr [[TBL_PTRADD]], align 4
; CHECK-NEXT: [[CRC_LE_SHIFT:%.*]] = lshr i32 [[CRC2]], 8
diff --git a/llvm/test/Transforms/LoopUnroll/peel-branch-weights-freq.ll b/llvm/test/Transforms/LoopUnroll/peel-branch-weights-freq.ll
new file mode 100644
index 0000000..1339afe
--- /dev/null
+++ b/llvm/test/Transforms/LoopUnroll/peel-branch-weights-freq.ll
@@ -0,0 +1,75 @@
+; Test branch weight metadata, estimated trip count metadata, and block
+; frequencies after loop peeling.
+
+; RUN: opt < %s -S -passes='print<block-freq>' 2>&1 | \
+; RUN: FileCheck -check-prefix=CHECK %s
+
+; The -implicit-check-not options make sure that no additional labels or calls
+; to @f show up.
+; RUN: opt < %s -S -passes='loop-unroll,print<block-freq>' \
+; RUN: -unroll-force-peel-count=2 2>&1 | \
+; RUN: FileCheck %s -check-prefix=CHECK-UR \
+; RUN: -implicit-check-not='{{^[^ ;]*:}}' \
+; RUN: -implicit-check-not='call void @f'
+
+; CHECK: block-frequency-info: test
+; CHECK: do.body: float = 10.0,
+
+; The sum should still be ~10.
+;
+; CHECK-UR: block-frequency-info: test
+; CHECK-UR: - [[DO_BODY_PEEL:.*]]: float = 1.0,
+; CHECK-UR: - [[DO_BODY_PEEL2:.*]]: float = 0.9,
+; CHECK-UR: - [[DO_BODY:.*]]: float = 8.1,
+
+declare void @f(i32)
+
+define void @test(i32 %n) {
+; CHECK-UR-LABEL: define void @test(
+; CHECK-UR: [[ENTRY:.*]]:
+; CHECK-UR: br label %[[DO_BODY_PEEL_BEGIN:.*]]
+; CHECK-UR: [[DO_BODY_PEEL_BEGIN]]:
+; CHECK-UR: br label %[[DO_BODY_PEEL:.*]]
+; CHECK-UR: [[DO_BODY_PEEL]]:
+; CHECK-UR: call void @f
+; CHECK-UR: br i1 %{{.*}}, label %[[DO_END:.*]], label %[[DO_BODY_PEEL_NEXT:.*]], !prof ![[#PROF:]]
+; CHECK-UR: [[DO_BODY_PEEL_NEXT]]:
+; CHECK-UR: br label %[[DO_BODY_PEEL2:.*]]
+; CHECK-UR: [[DO_BODY_PEEL2]]:
+; CHECK-UR: call void @f
+; CHECK-UR: br i1 %{{.*}}, label %[[DO_END]], label %[[DO_BODY_PEEL_NEXT1:.*]], !prof ![[#PROF]]
+; CHECK-UR: [[DO_BODY_PEEL_NEXT1]]:
+; CHECK-UR: br label %[[DO_BODY_PEEL_NEXT5:.*]]
+; CHECK-UR: [[DO_BODY_PEEL_NEXT5]]:
+; CHECK-UR: br label %[[ENTRY_PEEL_NEWPH:.*]]
+; CHECK-UR: [[ENTRY_PEEL_NEWPH]]:
+; CHECK-UR: br label %[[DO_BODY]]
+; CHECK-UR: [[DO_BODY]]:
+; CHECK-UR: call void @f
+; CHECK-UR: br i1 %{{.*}}, label %[[DO_END_LOOPEXIT:.*]], label %[[DO_BODY]], !prof ![[#PROF]], !llvm.loop ![[#LOOP_UR_LATCH:]]
+; CHECK-UR: [[DO_END_LOOPEXIT]]:
+; CHECK-UR: br label %[[DO_END]]
+; CHECK-UR: [[DO_END]]:
+; CHECK-UR: ret void
+
+entry:
+ br label %do.body
+
+do.body:
+ %i = phi i32 [ 0, %entry ], [ %inc, %do.body ]
+ %inc = add i32 %i, 1
+ call void @f(i32 %i)
+ %c = icmp sge i32 %inc, %n
+ br i1 %c, label %do.end, label %do.body, !prof !0
+
+do.end:
+ ret void
+}
+
+!0 = !{!"branch_weights", i32 1, i32 9}
+
+; CHECK-UR: ![[#PROF]] = !{!"branch_weights", i32 1, i32 9}
+; CHECK-UR: ![[#LOOP_UR_LATCH]] = distinct !{![[#LOOP_UR_LATCH]], ![[#LOOP_UR_PC:]], ![[#LOOP_UR_TC:]], ![[#DISABLE:]]}
+; CHECK-UR: ![[#LOOP_UR_PC]] = !{!"llvm.loop.peeled.count", i32 2}
+; CHECK-UR: ![[#LOOP_UR_TC]] = !{!"llvm.loop.estimated_trip_count", i32 8}
+; CHECK-UR: ![[#DISABLE]] = !{!"llvm.loop.unroll.disable"}
diff --git a/llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll b/llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll
index c58f8f1..63a0dd4 100644
--- a/llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll
+++ b/llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll
@@ -15,9 +15,9 @@ define void @test() {
; CHECK: loop.peel:
; CHECK-NEXT: [[X_PEEL:%.*]] = call i32 @get.x()
; CHECK-NEXT: switch i32 [[X_PEEL]], label [[LOOP_LATCH_PEEL:%.*]] [
-; CHECK-NEXT: i32 0, label [[LOOP_LATCH_PEEL]]
-; CHECK-NEXT: i32 1, label [[LOOP_EXIT:%.*]]
-; CHECK-NEXT: i32 2, label [[LOOP_EXIT]]
+; CHECK-NEXT: i32 0, label [[LOOP_LATCH_PEEL]]
+; CHECK-NEXT: i32 1, label [[LOOP_EXIT:%.*]]
+; CHECK-NEXT: i32 2, label [[LOOP_EXIT]]
; CHECK-NEXT: ], !prof [[PROF0:![0-9]+]]
; CHECK: loop.latch.peel:
; CHECK-NEXT: br label [[LOOP_PEEL_NEXT:%.*]]
@@ -26,10 +26,10 @@ define void @test() {
; CHECK: loop.peel2:
; CHECK-NEXT: [[X_PEEL3:%.*]] = call i32 @get.x()
; CHECK-NEXT: switch i32 [[X_PEEL3]], label [[LOOP_LATCH_PEEL4:%.*]] [
-; CHECK-NEXT: i32 0, label [[LOOP_LATCH_PEEL4]]
-; CHECK-NEXT: i32 1, label [[LOOP_EXIT]]
-; CHECK-NEXT: i32 2, label [[LOOP_EXIT]]
-; CHECK-NEXT: ], !prof [[PROF1:![0-9]+]]
+; CHECK-NEXT: i32 0, label [[LOOP_LATCH_PEEL4]]
+; CHECK-NEXT: i32 1, label [[LOOP_EXIT]]
+; CHECK-NEXT: i32 2, label [[LOOP_EXIT]]
+; CHECK-NEXT: ], !prof [[PROF0]]
; CHECK: loop.latch.peel4:
; CHECK-NEXT: br label [[LOOP_PEEL_NEXT1:%.*]]
; CHECK: loop.peel.next1:
@@ -41,31 +41,33 @@ define void @test() {
; CHECK: loop:
; CHECK-NEXT: [[X:%.*]] = call i32 @get.x()
; CHECK-NEXT: switch i32 [[X]], label [[LOOP_LATCH:%.*]] [
-; CHECK-NEXT: i32 0, label [[LOOP_LATCH]]
-; CHECK-NEXT: i32 1, label [[LOOP_EXIT_LOOPEXIT:%.*]]
-; CHECK-NEXT: i32 2, label [[LOOP_EXIT_LOOPEXIT]]
-; CHECK-NEXT: ], !prof [[PROF2:![0-9]+]]
+; CHECK-NEXT: i32 0, label [[LOOP_LATCH]]
+; CHECK-NEXT: i32 1, label [[LOOP_EXIT_LOOPEXIT:%.*]]
+; CHECK-NEXT: i32 2, label [[LOOP_EXIT_LOOPEXIT]]
+; CHECK-NEXT: ], !prof [[PROF0]]
; CHECK: loop.latch:
-; CHECK-NEXT: br label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br label [[LOOP]], !llvm.loop [[LOOP1:![0-9]+]]
; CHECK: loop.exit.loopexit:
; CHECK-NEXT: br label [[LOOP_EXIT]]
; CHECK: loop.exit:
; CHECK-NEXT: ret void
+;
+; DISABLEADV-LABEL: @test(
+; DISABLEADV-NEXT: entry:
+; DISABLEADV-NEXT: br label [[LOOP:%.*]]
+; DISABLEADV: loop:
+; DISABLEADV-NEXT: [[X:%.*]] = call i32 @get.x()
+; DISABLEADV-NEXT: switch i32 [[X]], label [[LOOP_LATCH:%.*]] [
+; DISABLEADV-NEXT: i32 0, label [[LOOP_LATCH]]
+; DISABLEADV-NEXT: i32 1, label [[LOOP_EXIT:%.*]]
+; DISABLEADV-NEXT: i32 2, label [[LOOP_EXIT]]
+; DISABLEADV-NEXT: ], !prof [[PROF0:![0-9]+]]
+; DISABLEADV: loop.latch:
+; DISABLEADV-NEXT: br label [[LOOP]]
+; DISABLEADV: loop.exit:
+; DISABLEADV-NEXT: ret void
+;
-; DISABLEADV-LABEL: @test()
-; DISABLEADV-NEXT: entry:
-; DISABLEADV-NEXT: br label %loop
-; DISABLEADV: loop
-; DISABLEADV-NEXT: %x = call i32 @get.x()
-; DISABLEADV-NEXT: switch i32 %x, label %loop.latch [
-; DISABLEADV-NEXT: i32 0, label %loop.latch
-; DISABLEADV-NEXT: i32 1, label %loop.exit
-; DISABLEADV-NEXT: i32 2, label %loop.exit
-; DISABLEADV-NEXT: ], !prof !0
-; DISABLEADV: loop.latch:
-; DISABLEADV-NEXT: br label %loop
-; DISABLEADV: loop.exit:
-; DISABLEADV-NEXT: ret void
entry:
br label %loop
@@ -89,9 +91,9 @@ loop.exit:
;.
; CHECK: [[PROF0]] = !{!"branch_weights", i32 100, i32 200, i32 20, i32 10}
-; CHECK: [[PROF1]] = !{!"branch_weights", i32 90, i32 180, i32 20, i32 10}
-; CHECK: [[PROF2]] = !{!"branch_weights", i32 80, i32 160, i32 20, i32 10}
-; CHECK: [[LOOP3]] = distinct !{!3, !4, !5}
-; CHECK: [[META4:![0-9]+]] = !{!"llvm.loop.peeled.count", i32 2}
-; CHECK: [[META5:![0-9]+]] = !{!"llvm.loop.unroll.disable"}
+; CHECK: [[LOOP1]] = distinct !{[[LOOP1]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
+; CHECK: [[META2]] = !{!"llvm.loop.peeled.count", i32 2}
+; CHECK: [[META3]] = !{!"llvm.loop.unroll.disable"}
+;.
+; DISABLEADV: [[PROF0]] = !{!"branch_weights", i32 100, i32 200, i32 20, i32 10}
;.
diff --git a/llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll b/llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll
index d91cb5b..e951215 100644
--- a/llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll
+++ b/llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll
@@ -15,13 +15,13 @@
; CHECK: br i1 %{{.*}}, label %[[NEXT0:.*]], label %for.cond.for.end_crit_edge, !prof !16
; CHECK: [[NEXT0]]:
; CHECK: br i1 %c, label %{{.*}}, label %side_exit, !prof !15
-; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !17
+; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !16
; CHECK: [[NEXT1]]:
; CHECK: br i1 %c, label %{{.*}}, label %side_exit, !prof !15
-; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !18
+; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !16
; CHECK: [[NEXT2]]:
; CHECK: br i1 %c, label %{{.*}}, label %side_exit.loopexit, !prof !15
-; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !18
+; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !16, !llvm.loop !17
define i32 @basic(ptr %p, i32 %k, i1 %c) #0 !prof !15 {
entry:
@@ -84,6 +84,7 @@ attributes #1 = { nounwind optsize }
;CHECK: !15 = !{!"branch_weights", i32 1, i32 0}
; This is a weights of latch and its copies.
;CHECK: !16 = !{!"branch_weights", i32 3001, i32 1001}
-;CHECK: !17 = !{!"branch_weights", i32 2000, i32 1001}
-;CHECK: !18 = !{!"branch_weights", i32 1001, i32 1001}
+;CHECK: !17 = distinct !{!17, !18, !19, {{.*}}}
+;CHECK: !18 = !{!"llvm.loop.peeled.count", i32 4}
+;CHECK: !19 = !{!"llvm.loop.estimated_trip_count", i32 0}
diff --git a/llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll b/llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll
index 15dce234..dec126f 100644
--- a/llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll
+++ b/llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll
@@ -5,7 +5,7 @@
; RUN: opt < %s -S -profile-summary-huge-working-set-size-threshold=9 -debug-only=loop-unroll -passes='require<profile-summary>,function(require<opt-remark-emit>,loop-unroll)' 2>&1 | FileCheck %s --check-prefix=NOPEEL
; REQUIRES: asserts
-; Make sure we use the profile information correctly to peel-off 3 iterations
+; Make sure we use the profile information correctly to peel-off 4 iterations
; from the loop, and update the branch weights for the peeled loop properly.
; CHECK: Loop Unroll: F[basic]
@@ -20,11 +20,11 @@
; CHECK-LABEL: @basic
; CHECK: br i1 %{{.*}}, label %[[NEXT0:.*]], label %for.cond.for.end_crit_edge, !prof !15
; CHECK: [[NEXT0]]:
-; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !16
+; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !15
; CHECK: [[NEXT1]]:
-; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !17
+; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !15
; CHECK: [[NEXT2]]:
-; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !17
+; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !15, !llvm.loop !16
define void @basic(ptr %p, i32 %k) #0 !prof !15 {
entry:
@@ -104,6 +104,7 @@ attributes #1 = { nounwind optsize }
!16 = !{!"branch_weights", i32 3001, i32 1001}
;CHECK: !15 = !{!"branch_weights", i32 3001, i32 1001}
-;CHECK: !16 = !{!"branch_weights", i32 2000, i32 1001}
-;CHECK: !17 = !{!"branch_weights", i32 1001, i32 1001}
+;CHECK: !16 = distinct !{!16, !17, !18, {{.*}}}
+;CHECK: !17 = !{!"llvm.loop.peeled.count", i32 4}
+;CHECK: !18 = !{!"llvm.loop.estimated_trip_count", i32 0}
diff --git a/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll b/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll
index ec71c67..0a3d201 100644
--- a/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll
+++ b/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll
@@ -3,7 +3,7 @@
define i32 @f(i1 %cond1) #0 !prof !0 {
; CHECK-LABEL: define i32 @f
-; CHECK-SAME: (i1 [[COND1:%.*]]) !prof [[PROF0:![0-9]+]] {
+; CHECK-SAME: (i1 [[COND1:%.*]]) {{.*}}{
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP1_PEEL_BEGIN:%.*]]
; CHECK: loop1.peel.begin:
@@ -19,7 +19,7 @@ define i32 @f(i1 %cond1) #0 !prof !0 {
; CHECK-NEXT: br label [[LOOP1:%.*]]
; CHECK: loop1:
; CHECK-NEXT: [[LD:%.*]] = load i64, ptr null, align 8
-; CHECK-NEXT: br i1 [[COND1]], label [[LOOP1]], label [[EXIT1_LOOPEXIT:%.*]], !prof [[PROF2:![0-9]+]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[COND1]], label [[LOOP1]], label [[EXIT1_LOOPEXIT:%.*]], !prof [[PROF1]], !llvm.loop [[LOOP2:![0-9]+]]
; CHECK: exit1.loopexit:
; CHECK-NEXT: [[LD_LCSSA_PH:%.*]] = phi i64 [ [[LD]], [[LOOP1]] ]
; CHECK-NEXT: br label [[EXIT1]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll
index 387bb43..2391842 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll
@@ -81,17 +81,6 @@ define void @powi_call(ptr %P) {
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load double, ptr [[GEP]], align 8
-; CHECK-NEXT: [[POWI:%.*]] = tail call double @llvm.powi.f64.i32(double [[L]], i32 3)
-; CHECK-NEXT: store double [[POWI]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
index 56a4683..6e3d257 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
@@ -33,20 +33,7 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[DST]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3
-; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]]
-; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8
-; CHECK-NEXT: store i8 [[CONV4]], ptr [[P_OUT_TAIL_09]], align 1
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[P_OUT_TAIL_09]], i64 1
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
@@ -108,20 +95,7 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[DST]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3
-; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]]
-; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8
-; CHECK-NEXT: store i8 [[CONV4]], ptr [[P_OUT_TAIL_09]], align 1
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[P_OUT_TAIL_09]], i64 1
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup.loopexit:
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
index e4ee677..6cf11be 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
@@ -362,8 +362,9 @@ define void @latch_branch_cost(ptr %dst) {
; PRED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], 104
; PRED-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; PRED: [[MIDDLE_BLOCK]]:
-; PRED-NEXT: br [[EXIT:label %.*]]
-; PRED: [[SCALAR_PH:.*:]]
+; PRED-NEXT: br label %[[EXIT:.*]]
+; PRED: [[EXIT]]:
+; PRED-NEXT: ret void
;
entry:
br label %loop
@@ -585,8 +586,9 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 {
; PRED-NEXT: [[TMP16:%.*]] = xor i1 [[TMP15]], true
; PRED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; PRED: [[MIDDLE_BLOCK]]:
-; PRED-NEXT: br [[EXIT:label %.*]]
-; PRED: [[SCALAR_PH:.*:]]
+; PRED-NEXT: br label %[[EXIT:.*]]
+; PRED: [[EXIT]]:
+; PRED-NEXT: ret void
;
entry:
br label %loop
@@ -609,7 +611,6 @@ exit:
}
define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
-;
; COMMON-LABEL: define void @low_trip_count_fold_tail_scalarized_store(
; COMMON-SAME: ptr [[DST:%.*]]) {
; COMMON-NEXT: [[ENTRY:.*:]]
@@ -659,16 +660,16 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
; COMMON-NEXT: store i8 6, ptr [[TMP6]], align 1
; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE12]]
; COMMON: [[PRED_STORE_CONTINUE12]]:
-; COMMON-NEXT: br i1 false, label %[[PRED_STORE_IF13:.*]], label %[[EXIT:.*]]
+; COMMON-NEXT: br i1 false, label %[[PRED_STORE_IF13:.*]], label %[[EXIT1:.*]]
; COMMON: [[PRED_STORE_IF13]]:
; COMMON-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], i64 7
; COMMON-NEXT: store i8 7, ptr [[TMP7]], align 1
-; COMMON-NEXT: br label %[[EXIT]]
-; COMMON: [[EXIT]]:
-; COMMON-NEXT: br label %[[SCALAR_PH:.*]]
-; COMMON: [[SCALAR_PH]]:
-; COMMON-NEXT: br [[EXIT1:label %.*]]
-; COMMON: [[SCALAR_PH1:.*:]]
+; COMMON-NEXT: br label %[[EXIT1]]
+; COMMON: [[EXIT1]]:
+; COMMON-NEXT: br label %[[SCALAR_PH1:.*]]
+; COMMON: [[SCALAR_PH1]]:
+; COMMON-NEXT: br [[EXIT:label %.*]]
+; COMMON: [[SCALAR_PH:.*:]]
;
entry:
br label %loop
@@ -1160,8 +1161,9 @@ define void @redundant_branch_and_tail_folding(ptr %dst, i1 %c) {
; PRED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24
; PRED-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; PRED: [[MIDDLE_BLOCK]]:
-; PRED-NEXT: br [[EXIT:label %.*]]
-; PRED: [[SCALAR_PH:.*:]]
+; PRED-NEXT: br label %[[EXIT:.*]]
+; PRED: [[EXIT]]:
+; PRED-NEXT: ret void
;
entry:
br label %loop.header
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll
index 1af55e9..71acac2 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll
@@ -65,36 +65,6 @@ define void @check_widen_intrinsic_with_nnan(ptr noalias %dst.0, ptr noalias %ds
; CHECK-NEXT: br i1 [[TMP34]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds double, ptr [[SRC_1]], i64 [[IV]]
-; CHECK-NEXT: [[L_1:%.*]] = load double, ptr [[GEP_SRC_1]], align 8
-; CHECK-NEXT: [[ABS:%.*]] = tail call nnan double @llvm.fabs.f64(double [[L_1]])
-; CHECK-NEXT: [[C_0:%.*]] = fcmp olt double [[ABS]], 1.000000e+00
-; CHECK-NEXT: br i1 [[C_0]], label %[[THEN:.*]], label %[[ELSE:.*]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: [[L_2:%.*]] = load double, ptr [[SRC_2]], align 8
-; CHECK-NEXT: [[IV_SUB_1:%.*]] = add nsw i64 [[IV]], -1
-; CHECK-NEXT: [[GEP_IV_SUB_1:%.*]] = getelementptr double, ptr [[DST_0]], i64 [[IV_SUB_1]]
-; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_IV_SUB_1]], align 8
-; CHECK-NEXT: [[C_1:%.*]] = fcmp oeq double [[L_2]], 0.000000e+00
-; CHECK-NEXT: br i1 [[C_1]], label %[[MERGE:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: [[IV_SUB_2:%.*]] = add nsw i64 [[IV]], -1
-; CHECK-NEXT: [[GEP_IV_SUB_2:%.*]] = getelementptr double, ptr [[DST_0]], i64 [[IV_SUB_2]]
-; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_IV_SUB_2]], align 8
-; CHECK-NEXT: br label %[[MERGE]]
-; CHECK: [[MERGE]]:
-; CHECK-NEXT: [[MERGE_IV:%.*]] = phi i64 [ [[IV_SUB_2]], %[[ELSE]] ], [ [[IV_SUB_1]], %[[THEN]] ]
-; CHECK-NEXT: [[GEP_DST_1:%.*]] = getelementptr inbounds i32, ptr [[DST_1]], i64 [[MERGE_IV]]
-; CHECK-NEXT: store i32 10, ptr [[GEP_DST_1]], align 4
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll
index 890ff1d..4bb8a0e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll
@@ -69,20 +69,7 @@ define i32 @test_phi_iterator_invalidation(ptr %A, ptr noalias %B) {
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SEXT:%.*]] = sext i16 [[SCALAR_RECUR]] to i32
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV_NEXT]]
-; CHECK-NEXT: [[FOR_NEXT]] = load i16, ptr [[GEP_A]], align 2
-; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i32, ptr [[B]], i64 [[IV_NEXT]]
-; CHECK-NEXT: store i32 [[SEXT]], ptr [[GEP_B]], align 4
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1001
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret i32 0
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll
index 32fdc5cd6..56a1abd 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll
@@ -113,3 +113,49 @@ loop:
exit:
ret float %max.next
}
+
+define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) {
+; CHECK-LABEL: define float @test_fmax_and_fmin(
+; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]]
+; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4
+; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4
+; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]])
+; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]])
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ]
+; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ]
+; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]]
+; CHECK-NEXT: ret float [[SUB]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %min = phi float [ 0.000000e+00, %entry ], [ %min.next, %loop ]
+ %max = phi float [ 0.000000e+00, %entry ], [ %max.next, %loop ]
+ %gep.src.0 = getelementptr inbounds nuw float, ptr %src.0, i64 %iv
+ %gep.src.1 = getelementptr inbounds nuw float, ptr %src.1, i64 %iv
+ %l.0 = load float, ptr %gep.src.0, align 4
+ %l.1 = load float, ptr %gep.src.1, align 4
+ %max.next = tail call noundef float @llvm.maxnum.f32(float %max, float %l.0)
+ %min.next = tail call noundef float @llvm.minnum.f32(float %min, float %l.1)
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, %n
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ %sub = fsub float %max.next, %min.next
+ ret float %sub
+}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll
index db088f8..bfee39ea 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll
@@ -18,21 +18,8 @@ define double @test_reduction_costs() {
; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_1:.*]]
-; CHECK: [[LOOP_1]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_1]] ]
-; CHECK-NEXT: [[R_1:%.*]] = phi double [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[R_1_NEXT:%.*]], %[[LOOP_1]] ]
-; CHECK-NEXT: [[R_2:%.*]] = phi double [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[R_2_NEXT:%.*]], %[[LOOP_1]] ]
-; CHECK-NEXT: [[R_1_NEXT]] = fadd double [[R_1]], 3.000000e+00
-; CHECK-NEXT: [[R_2_NEXT]] = fadd double [[R_2]], 9.000000e+00
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_1]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[R_1_NEXT_LCSSA:%.*]] = phi double [ [[R_1_NEXT]], %[[LOOP_1]] ], [ [[TMP0]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[R_2_NEXT_LCSSA:%.*]] = phi double [ [[R_2_NEXT]], %[[LOOP_1]] ], [ [[TMP1]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[DIV:%.*]] = fmul double [[R_1_NEXT_LCSSA]], [[R_2_NEXT_LCSSA]]
+; CHECK-NEXT: [[DIV:%.*]] = fmul double [[TMP0]], [[TMP1]]
; CHECK-NEXT: ret double [[DIV]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll
index a74c33f..42a1940 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll
@@ -169,22 +169,9 @@ define i64 @int_and_pointer_iv(ptr %start, i32 %N) {
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[TMP5]], i32 2
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[RECUR_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[PTR_IV]], align 4
-; CHECK-NEXT: [[RECUR_NEXT]] = zext i32 [[L]] to i64
-; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 4
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[RECUR_LCSSA:%.*]] = phi i64 [ [[SCALAR_RECUR]], [[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[RECUR_LCSSA]]
+; CHECK-NEXT: ret i64 [[VECTOR_RECUR_EXTRACT_FOR_PHI]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll b/llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll
index f1571e6..d80fdd1 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll
@@ -51,22 +51,8 @@ define i32 @test_invariant_replicate_region(i32 %x, i1 %c) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[PREDPHI]], i32 3
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: [[REM_1:%.*]] = urem i32 10, [[X]]
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[RES:%.*]] = phi i32 [ 0, %[[LOOP_HEADER]] ], [ [[REM_1]], %[[THEN]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 99
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RES_LCSSA:%.*]] = phi i32 [ [[RES]], %[[LOOP_LATCH]] ], [ [[TMP17]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[RES_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP17]]
;
entry:
br label %loop.header
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
index dd8bd27..e424649 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll
@@ -474,19 +474,8 @@ define i32 @tc4(ptr noundef readonly captures(none) %tmp) vscale_range(1,16) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]])
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_0179:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[ADD]] = add i32 [[SUM_0179]], [[TMP5]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP4]]
;
entry:
br label %for.body
@@ -520,6 +509,7 @@ define i32 @tc4_from_profile(ptr noundef readonly captures(none) %tmp, i64 %N) v
; CHECK-NEXT: [[ADD]] = add i32 [[SUM_0179]], [[TMP0]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]], !prof [[PROF9:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ]
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll b/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll
index 80bf956..9f518e4 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll
@@ -62,18 +62,8 @@ define i32 @add_reduction_select_operand_constant_but_non_uniform() {
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[ADD2_REASS:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 42, %[[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[ADD2_REASS]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[RDX_NEXT]] = add i32 0, [[RDX]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD2_REASS]], 64
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[RDX_NEXT]], %[[LOOP]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP3]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll
index 544ef5c..a6e0f8a 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll
@@ -32,14 +32,7 @@ define void @sincos_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noali
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
; CHECK: [[EXIT:.*:]]
;
; CHECK-ARMPL-LABEL: define void @sincos_f32(
@@ -112,14 +105,7 @@ define void @sincos_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noali
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincos.f64(double [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
-; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
-; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
; CHECK: [[EXIT:.*:]]
;
; CHECK-ARMPL-LABEL: define void @sincos_f64(
@@ -209,15 +195,6 @@ define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly
; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP15]], 1
; CHECK-ARMPL: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP16]], ptr [[TMP19:%.*]], i32 4, <vscale x 4 x i1> [[TMP14:%.*]])
; CHECK-ARMPL: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP17]], ptr [[TMP21:%.*]], i32 4, <vscale x 4 x i1> [[TMP14]])
-; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]]
-; CHECK-ARMPL: [[SCALAR_PH:.*:]]
-; CHECK-ARMPL: [[FOR_BODY:.*:]]
-; CHECK-ARMPL: [[IF_THEN:.*:]]
-; CHECK-ARMPL: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
-; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK-ARMPL: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK-ARMPL: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
; CHECK-ARMPL: [[IF_MERGE:.*:]]
; CHECK-ARMPL: [[FOR_END:.*:]]
;
@@ -277,14 +254,7 @@ define void @modf_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.modf.f32(float [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
; CHECK: [[EXIT:.*:]]
;
; CHECK-ARMPL-LABEL: define void @modf_f32(
@@ -357,14 +327,7 @@ define void @modf_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.modf.f64(double [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
-; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
-; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
; CHECK: [[EXIT:.*:]]
;
; CHECK-ARMPL-LABEL: define void @modf_f64(
@@ -441,14 +404,7 @@ define void @sincospi_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noa
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincospi.f32(float [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
; CHECK: [[EXIT:.*:]]
;
; CHECK-ARMPL-LABEL: define void @sincospi_f32(
@@ -521,14 +477,7 @@ define void @sincospi_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noa
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincospi.f64(double [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
-; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
-; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
; CHECK: [[EXIT:.*:]]
;
; CHECK-ARMPL-LABEL: define void @sincospi_f64(
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
index ff3f6e9..56ace54 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
@@ -30,17 +30,6 @@ define void @always_vectorize(ptr %p, i32 %x) {
; DEFAULT-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; DEFAULT: [[MIDDLE_BLOCK]]:
; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; DEFAULT: [[SCALAR_PH:.*]]:
-; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
-; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]]
-; DEFAULT-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
-; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4
-; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; DEFAULT: [[FOR_COND_CLEANUP]]:
; DEFAULT-NEXT: ret void
;
@@ -59,17 +48,6 @@ define void @always_vectorize(ptr %p, i32 %x) {
; OPTSIZE-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; OPTSIZE: [[MIDDLE_BLOCK]]:
; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; OPTSIZE: [[SCALAR_PH:.*]]:
-; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; OPTSIZE: [[FOR_BODY]]:
-; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; OPTSIZE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]]
-; OPTSIZE-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
-; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4
-; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; OPTSIZE: [[FOR_COND_CLEANUP]]:
; OPTSIZE-NEXT: ret void
;
@@ -88,17 +66,6 @@ define void @always_vectorize(ptr %p, i32 %x) {
; MINSIZE-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; MINSIZE: [[MIDDLE_BLOCK]]:
; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; MINSIZE: [[SCALAR_PH:.*]]:
-; MINSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; MINSIZE: [[FOR_BODY]]:
-; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; MINSIZE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]]
-; MINSIZE-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
-; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4
-; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; MINSIZE: [[FOR_COND_CLEANUP]]:
; MINSIZE-NEXT: ret void
;
@@ -390,23 +357,6 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n)
; DEFAULT-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; DEFAULT: [[MIDDLE_BLOCK]]:
; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; DEFAULT: [[SCALAR_PH:.*]]:
-; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
-; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; DEFAULT-NEXT: [[TMP72:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8
-; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP72]]
-; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP72]], 1
-; DEFAULT-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]]
-; DEFAULT-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]]
-; DEFAULT-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP72]], 2
-; DEFAULT-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]]
-; DEFAULT-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]]
-; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1
-; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15
-; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; DEFAULT: [[FOR_COND_CLEANUP]]:
; DEFAULT-NEXT: ret void
;
@@ -531,23 +481,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; DEFAULT-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; DEFAULT: [[MIDDLE_BLOCK]]:
; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; DEFAULT: [[SCALAR_PH:.*]]:
-; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
-; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; DEFAULT-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8
-; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]]
-; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1
-; DEFAULT-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]]
-; DEFAULT-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]]
-; DEFAULT-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP26]], 2
-; DEFAULT-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]]
-; DEFAULT-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]]
-; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[IV]]
-; DEFAULT-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1
-; DEFAULT-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 15
-; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; DEFAULT: [[FOR_COND_CLEANUP]]:
; DEFAULT-NEXT: ret void
;
@@ -598,23 +531,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; OPTSIZE-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; OPTSIZE: [[MIDDLE_BLOCK]]:
; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; OPTSIZE: [[SCALAR_PH:.*]]:
-; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; OPTSIZE: [[FOR_BODY]]:
-; OPTSIZE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; OPTSIZE-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8
-; OPTSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]]
-; OPTSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1
-; OPTSIZE-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]]
-; OPTSIZE-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]]
-; OPTSIZE-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP26]], 2
-; OPTSIZE-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]]
-; OPTSIZE-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]]
-; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[IV]]
-; OPTSIZE-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1
-; OPTSIZE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 15
-; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; OPTSIZE: [[FOR_COND_CLEANUP]]:
; OPTSIZE-NEXT: ret void
;
@@ -665,23 +581,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; MINSIZE-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; MINSIZE: [[MIDDLE_BLOCK]]:
; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; MINSIZE: [[SCALAR_PH:.*]]:
-; MINSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; MINSIZE: [[FOR_BODY]]:
-; MINSIZE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; MINSIZE-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8
-; MINSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]]
-; MINSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1
-; MINSIZE-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]]
-; MINSIZE-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]]
-; MINSIZE-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP26]], 2
-; MINSIZE-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]]
-; MINSIZE-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]]
-; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[IV]]
-; MINSIZE-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1
-; MINSIZE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 15
-; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; MINSIZE: [[FOR_COND_CLEANUP]]:
; MINSIZE-NEXT: ret void
;
@@ -746,23 +645,6 @@ define void @dont_vectorize_with_minsize() {
; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; DEFAULT: [[MIDDLE_BLOCK]]:
; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; DEFAULT: [[SCALAR_PH:.*]]:
-; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
-; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]]
-; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
-; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16
-; DEFAULT-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]]
-; DEFAULT-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
-; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; DEFAULT: [[FOR_COND_CLEANUP]]:
; DEFAULT-NEXT: ret void
;
@@ -789,23 +671,6 @@ define void @dont_vectorize_with_minsize() {
; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; OPTSIZE: [[MIDDLE_BLOCK]]:
; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; OPTSIZE: [[SCALAR_PH:.*]]:
-; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; OPTSIZE: [[FOR_BODY]]:
-; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; OPTSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; OPTSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]]
-; OPTSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
-; OPTSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16
-; OPTSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]]
-; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
-; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; OPTSIZE: [[FOR_COND_CLEANUP]]:
; OPTSIZE-NEXT: ret void
;
@@ -832,23 +697,6 @@ define void @dont_vectorize_with_minsize() {
; MINSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; MINSIZE: [[MIDDLE_BLOCK]]:
; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; MINSIZE: [[SCALAR_PH:.*]]:
-; MINSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; MINSIZE: [[FOR_BODY]]:
-; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; MINSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; MINSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]]
-; MINSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
-; MINSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16
-; MINSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]]
-; MINSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
-; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; MINSIZE: [[FOR_COND_CLEANUP]]:
; MINSIZE-NEXT: ret void
;
@@ -913,23 +761,6 @@ define void @vectorization_forced_minsize_reduce_width() {
; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; DEFAULT: [[MIDDLE_BLOCK]]:
; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; DEFAULT: [[SCALAR_PH:.*]]:
-; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
-; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]]
-; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
-; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16
-; DEFAULT-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]]
-; DEFAULT-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
-; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; DEFAULT: [[FOR_COND_CLEANUP]]:
; DEFAULT-NEXT: ret void
;
@@ -956,23 +787,6 @@ define void @vectorization_forced_minsize_reduce_width() {
; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; OPTSIZE: [[MIDDLE_BLOCK]]:
; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; OPTSIZE: [[SCALAR_PH:.*]]:
-; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; OPTSIZE: [[FOR_BODY]]:
-; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; OPTSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; OPTSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]]
-; OPTSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
-; OPTSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16
-; OPTSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]]
-; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
-; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; OPTSIZE: [[FOR_COND_CLEANUP]]:
; OPTSIZE-NEXT: ret void
;
@@ -999,23 +813,6 @@ define void @vectorization_forced_minsize_reduce_width() {
; MINSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; MINSIZE: [[MIDDLE_BLOCK]]:
; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; MINSIZE: [[SCALAR_PH:.*]]:
-; MINSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; MINSIZE: [[FOR_BODY]]:
-; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; MINSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; MINSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]]
-; MINSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
-; MINSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16
-; MINSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]]
-; MINSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
-; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; MINSIZE: [[FOR_COND_CLEANUP]]:
; MINSIZE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll
index 0086f6e..b033f60 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll
@@ -20,22 +20,22 @@ define i32 @red_zext_mul_by_63(ptr %start, ptr %end) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], splat (i32 63)
-; CHECK-NEXT: [[TMP5]] = add <16 x i32> [[VEC_PHI]], [[TMP4]]
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[GEP_IV_NEXT:%.*]], %[[LOOP]] ]
@@ -48,7 +48,7 @@ define i32 @red_zext_mul_by_63(ptr %start, ptr %end) {
; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV]], [[END]]
; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]]
;
entry:
@@ -86,17 +86,17 @@ define i32 @red_zext_mul_by_255(ptr %start, ptr %end) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], splat (i32 255)
-; CHECK-NEXT: [[TMP5]] = add <16 x i32> [[VEC_PHI]], [[TMP4]]
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
@@ -218,22 +218,22 @@ define i32 @red_sext_mul_by_63(ptr %start, ptr %end) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
; CHECK-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], splat (i32 63)
-; CHECK-NEXT: [[TMP5]] = add <16 x i32> [[VEC_PHI]], [[TMP4]]
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]])
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[GEP_IV_NEXT:%.*]], %[[LOOP]] ]
@@ -246,7 +246,7 @@ define i32 @red_sext_mul_by_63(ptr %start, ptr %end) {
; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV]], [[END]]
; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll
index 24375dd..dd239c0 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll
@@ -28,7 +28,8 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK: middle.block:
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: for.exit:
+; CHECK-NEXT: ret i32 [[TMP11]]
;
entry:
br label %for.body
@@ -80,7 +81,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[IV_NEXT]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[IV_NEXT]]
@@ -111,7 +112,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK-NEXT: [[TMP13]] = add <4 x i32> [[TMP14]], [[VEC_PHI9]]
; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX9]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC5]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP13]])
; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC5]]
@@ -135,7 +136,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK-NEXT: [[CMP_IV_NEG:%.*]] = icmp ugt i64 [[IV_NEG]], 0
; CHECK-NEXT: [[CMP_IV:%.*]] = icmp ne i64 [[ACCUM1]], -1
; CHECK-NEXT: [[EXITCOND:%.*]] = and i1 [[CMP_IV_NEG]], [[CMP_IV]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[WHILE_BODY1]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[WHILE_BODY1]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: while.end.loopexit:
; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ [[ADD]], [[WHILE_BODY1]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[TMP15]], [[VEC_EPILOG_MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret void
@@ -494,11 +495,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) {
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret i32 [[TMP182]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll
index 43fccdc..49e9989 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll
@@ -261,7 +261,8 @@ define i32 @sudot_neon(ptr %a, ptr %b) #1 {
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
; CHECK-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: for.exit:
+; CHECK-NEXT: ret i32 [[TMP13]]
;
; CHECK-NOI8MM-LABEL: define i32 @sudot_neon(
; CHECK-NOI8MM-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR1:[0-9]+]] {
@@ -296,7 +297,8 @@ define i32 @sudot_neon(ptr %a, ptr %b) #1 {
; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP13]], [[TMP12]]
; CHECK-NOI8MM-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]])
; CHECK-NOI8MM-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-NOI8MM: scalar.ph:
+; CHECK-NOI8MM: for.exit:
+; CHECK-NOI8MM-NEXT: ret i32 [[TMP15]]
;
entry:
br label %for.body
@@ -349,12 +351,13 @@ define i32 @usdot_neon(ptr %a, ptr %b) #1 {
; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
; CHECK-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: for.exit:
+; CHECK-NEXT: ret i32 [[TMP13]]
;
; CHECK-NOI8MM-LABEL: define i32 @usdot_neon(
; CHECK-NOI8MM-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR1]] {
@@ -384,12 +387,13 @@ define i32 @usdot_neon(ptr %a, ptr %b) #1 {
; CHECK-NOI8MM-NEXT: [[TMP13]] = add <16 x i32> [[TMP11]], [[VEC_PHI1]]
; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NOI8MM-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-NOI8MM-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NOI8MM-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK-NOI8MM: middle.block:
; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP13]], [[TMP12]]
; CHECK-NOI8MM-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]])
; CHECK-NOI8MM-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-NOI8MM: scalar.ph:
+; CHECK-NOI8MM: for.exit:
+; CHECK-NOI8MM-NEXT: ret i32 [[TMP15]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll
index 410993b..801eb81 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll
@@ -30,7 +30,8 @@ define i32 @dotp(ptr %a, ptr %b) {
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVE1: scalar.ph:
+; CHECK-INTERLEAVE1: for.exit:
+; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP9]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @dotp(
; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -65,7 +66,8 @@ define i32 @dotp(ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVED: scalar.ph:
+; CHECK-INTERLEAVED: for.exit:
+; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP14]]
;
; CHECK-MAXBW-LABEL: define i32 @dotp(
; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -90,7 +92,8 @@ define i32 @dotp(ptr %a, ptr %b) {
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW: for.exit:
+; CHECK-MAXBW-NEXT: ret i32 [[TMP9]]
;
entry:
br label %for.body
@@ -196,11 +199,12 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) {
; CHECK-INTERLEAVE1-NEXT: [[TMP69]] = add <16 x i32> [[TMP68]], [[VEC_PHI]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP70:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]])
; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVE1: scalar.ph:
+; CHECK-INTERLEAVE1: for.exit:
+; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP71]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_different_types(
; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -354,12 +358,13 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[TMP138]] = add <16 x i32> [[TMP136]], [[VEC_PHI1]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP139:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP139]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP139]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP138]], [[TMP137]]
; CHECK-INTERLEAVED-NEXT: [[TMP140:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]])
; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVED: scalar.ph:
+; CHECK-INTERLEAVED: for.exit:
+; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP140]]
;
; CHECK-MAXBW-LABEL: define i32 @not_dotp_different_types(
; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -442,11 +447,12 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) {
; CHECK-MAXBW-NEXT: [[TMP69]] = add <16 x i32> [[TMP68]], [[VEC_PHI]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[TMP70:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-MAXBW-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]])
; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW: for.exit:
+; CHECK-MAXBW-NEXT: ret i32 [[TMP71]]
;
entry:
br label %for.body
@@ -491,11 +497,12 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) {
; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = add <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15
; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVE1: scalar.ph:
+; CHECK-INTERLEAVE1: for.exit:
+; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP11]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_not_loop_carried(
; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -517,11 +524,12 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = add <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15
; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVED: scalar.ph:
+; CHECK-INTERLEAVED: for.exit:
+; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP11]]
;
; CHECK-MAXBW-LABEL: define i32 @not_dotp_not_loop_carried(
; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -543,11 +551,12 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) {
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = add <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15
; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW: for.exit:
+; CHECK-MAXBW-NEXT: ret i32 [[TMP11]]
;
entry:
br label %for.body
@@ -594,11 +603,12 @@ define i32 @not_dotp_not_phi(ptr %a, ptr noalias %b, ptr noalias %c) {
; CHECK-INTERLEAVE1-NEXT: store <16 x i32> [[TMP8]], ptr [[TMP13]], align 4
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = extractelement <16 x i32> [[TMP7]], i32 15
; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVE1: scalar.ph:
+; CHECK-INTERLEAVE1: for.exit:
+; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP12]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_not_phi(
; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
@@ -622,11 +632,12 @@ define i32 @not_dotp_not_phi(ptr %a, ptr noalias %b, ptr noalias %c) {
; CHECK-INTERLEAVED-NEXT: store <16 x i32> [[TMP8]], ptr [[TMP13]], align 4
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = extractelement <16 x i32> [[TMP7]], i32 15
; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVED: scalar.ph:
+; CHECK-INTERLEAVED: for.exit:
+; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP12]]
;
; CHECK-MAXBW-LABEL: define i32 @not_dotp_not_phi(
; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
@@ -650,11 +661,12 @@ define i32 @not_dotp_not_phi(ptr %a, ptr noalias %b, ptr noalias %c) {
; CHECK-MAXBW-NEXT: store <16 x i32> [[TMP8]], ptr [[TMP13]], align 4
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = extractelement <16 x i32> [[TMP7]], i32 15
; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW: for.exit:
+; CHECK-MAXBW-NEXT: ret i32 [[TMP12]]
;
entry:
br label %for.body
@@ -733,7 +745,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) {
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP31]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP33:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE13]])
; CHECK-INTERLEAVE1-NEXT: [[TMP34:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE10]])
@@ -831,7 +843,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP50]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP51:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP51]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP51]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE10]], [[PARTIAL_REDUCE13]]
; CHECK-INTERLEAVED-NEXT: [[TMP52:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
@@ -897,7 +909,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) {
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP31]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP33:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE13]])
; CHECK-MAXBW-NEXT: [[TMP34:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE10]])
@@ -1292,11 +1304,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) {
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-INTERLEAVE1-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-INTERLEAVE1-NEXT: br label [[EXIT:%.*]]
-; CHECK-INTERLEAVE1: scalar.ph:
+; CHECK-INTERLEAVE1: exit:
+; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP182]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @dotp_predicated(
; CHECK-INTERLEAVED-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -1627,11 +1640,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-INTERLEAVED-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-INTERLEAVED-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-INTERLEAVED-NEXT: br label [[EXIT:%.*]]
-; CHECK-INTERLEAVED: scalar.ph:
+; CHECK-INTERLEAVED: exit:
+; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP182]]
;
; CHECK-MAXBW-LABEL: define i32 @dotp_predicated(
; CHECK-MAXBW-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -1962,11 +1976,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) {
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-MAXBW-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: br label [[EXIT:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW: exit:
+; CHECK-MAXBW-NEXT: ret i32 [[TMP182]]
;
entry:
br label %for.body
@@ -2010,12 +2025,14 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) {
; CHECK-INTERLEAVE1-NEXT: [[TMP8]] = add <16 x i32> [[TMP7]], [[VEC_PHI]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]])
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP6]], i32 15
; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVE1: scalar.ph:
+; CHECK-INTERLEAVE1: for.exit:
+; CHECK-INTERLEAVE1-NEXT: [[RESULT:%.*]] = add i32 [[TMP10]], [[TMP11]]
+; CHECK-INTERLEAVE1-NEXT: ret i32 [[RESULT]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_extend_user(
; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -2045,13 +2062,15 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[TMP14]] = add <16 x i32> [[TMP12]], [[VEC_PHI1]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP14]], [[TMP13]]
; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]])
; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = extractelement <16 x i32> [[TMP10]], i32 15
; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVED: scalar.ph:
+; CHECK-INTERLEAVED: for.exit:
+; CHECK-INTERLEAVED-NEXT: [[RESULT:%.*]] = add i32 [[TMP16]], [[TMP17]]
+; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT]]
;
; CHECK-MAXBW-LABEL: define i32 @not_dotp_extend_user(
; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -2072,12 +2091,14 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) {
; CHECK-MAXBW-NEXT: [[TMP8]] = add <16 x i32> [[TMP7]], [[VEC_PHI]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]])
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP6]], i32 15
; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW: for.exit:
+; CHECK-MAXBW-NEXT: [[RESULT:%.*]] = add i32 [[TMP10]], [[TMP11]]
+; CHECK-MAXBW-NEXT: ret i32 [[RESULT]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
index 09917fc..6e11e55 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
@@ -501,7 +501,8 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]])
; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVE1: scalar.ph:
+; CHECK-INTERLEAVE1: for.exit:
+; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP71]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_different_types(
; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -660,7 +661,8 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP138]], [[TMP137]]
; CHECK-INTERLEAVED-NEXT: [[TMP142:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]])
; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVED: scalar.ph:
+; CHECK-INTERLEAVED: for.exit:
+; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP142]]
;
; CHECK-MAXBW-LABEL: define i32 @not_dotp_different_types(
; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -747,7 +749,8 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 {
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP138]])
; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW: for.exit:
+; CHECK-MAXBW-NEXT: ret i32 [[TMP71]]
;
entry:
br label %for.body
@@ -800,7 +803,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = add <vscale x 8 x i32> [[TMP16]], [[TMP17]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = mul nuw i32 [[TMP20]], 8
@@ -848,7 +851,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = add <vscale x 8 x i32> [[TMP25]], [[TMP26]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul nuw i32 [[TMP29]], 8
@@ -890,7 +893,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP27:%.*]] = add <vscale x 8 x i32> [[TMP25]], [[TMP26]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = mul nuw i32 [[TMP20]], 8
@@ -949,7 +952,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = add <vscale x 8 x i32> [[TMP16]], [[TMP15]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-INTERLEAVE1-NEXT: [[TMP24:%.*]] = mul nuw i32 [[TMP23]], 8
@@ -987,7 +990,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i32> [[TMP30]], [[TMP22]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = mul nuw i32 [[TMP27]], 8
@@ -1019,7 +1022,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i32> [[TMP20]], [[TMP19]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-MAXBW-NEXT: [[TMP24:%.*]] = mul nuw i32 [[TMP23]], 8
@@ -1108,7 +1111,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP41]] = add <vscale x 4 x i32> [[TMP40]], [[VEC_PHI]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP41]])
; CHECK-INTERLEAVE1-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP35]])
@@ -1226,7 +1229,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP81]] = add <vscale x 4 x i32> [[TMP79]], [[VEC_PHI1]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[TMP81]], [[TMP80]]
; CHECK-INTERLEAVED-NEXT: [[TMP83:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]])
@@ -1296,7 +1299,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE16]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI4]], <vscale x 8 x i32> [[TMP73]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP74:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP74]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP74]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE16]])
; CHECK-MAXBW-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE17]])
@@ -1393,11 +1396,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
; CHECK-INTERLEAVE1-NEXT: br label [[EXIT:%.*]]
-; CHECK-INTERLEAVE1: scalar.ph:
+; CHECK-INTERLEAVE1: exit:
+; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP22]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @dotp_predicated(
; CHECK-INTERLEAVED-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -1430,11 +1434,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
; CHECK-INTERLEAVED-NEXT: br label [[EXIT:%.*]]
-; CHECK-INTERLEAVED: scalar.ph:
+; CHECK-INTERLEAVED: exit:
+; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP22]]
;
; CHECK-MAXBW-LABEL: define i32 @dotp_predicated(
; CHECK-MAXBW-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -1467,11 +1472,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = xor i1 [[TMP19]], true
-; CHECK-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: br label [[EXIT:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW: exit:
+; CHECK-MAXBW-NEXT: ret i32 [[TMP21]]
;
entry:
br label %for.body
@@ -1519,7 +1525,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[VEC_PHI]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]])
; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32()
@@ -1566,7 +1572,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 4 x i32> [[TMP22]], [[VEC_PHI1]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[TMP24]], [[TMP23]]
; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]])
@@ -1601,7 +1607,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP24]] = add <vscale x 8 x i32> [[TMP22]], [[VEC_PHI1]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP24]])
; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32()
@@ -1660,7 +1666,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 2 x i64> [[VEC_PHI]], [[TMP14]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP15]])
; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]]
@@ -1707,7 +1713,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 2 x i64> [[VEC_PHI1]], [[TMP23]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP25]], [[TMP24]]
; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]])
@@ -1742,7 +1748,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP14]] = add <vscale x 8 x i64> [[VEC_PHI]], [[TMP13]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP14]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]]
@@ -1860,7 +1866,7 @@ define void @not_dotp_not_phi2(ptr %matrix, i32 %n) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add i32 [[TMP21]], [[TMP15]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP23]], [[TMP22]]
; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
@@ -1972,7 +1978,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <8 x i64> [[TMP4]], [[VEC_PHI]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]])
; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
@@ -2010,7 +2016,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP9]] = add <8 x i64> [[TMP7]], [[VEC_PHI1]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP9]], [[TMP8]]
; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]])
@@ -2047,7 +2053,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP11]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
@@ -2105,7 +2111,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <8 x i64> [[TMP4]], [[VEC_PHI]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]])
; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
@@ -2143,7 +2149,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP9]] = add <8 x i64> [[TMP7]], [[VEC_PHI1]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP9]], [[TMP8]]
; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]])
@@ -2180,7 +2186,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP11]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
@@ -2247,7 +2253,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP18]] = add <vscale x 2 x i64> [[TMP17]], [[VEC_PHI]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP18]])
; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
@@ -2301,7 +2307,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP28]] = add <vscale x 2 x i64> [[TMP26]], [[VEC_PHI1]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP28]], [[TMP27]]
; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]])
@@ -2343,7 +2349,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-MAXBW-NEXT: [[TMP20]] = add <vscale x 8 x i64> [[TMP17]], [[VEC_PHI]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP20]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
@@ -2465,7 +2471,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-INTERLEAVE1-NEXT: [[TMP36]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP36]])
; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP33]])
@@ -2565,7 +2571,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]])
; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]])
@@ -2665,7 +2671,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]])
; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]])
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll
index 1ef5b20..db3166c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll
@@ -499,7 +499,8 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 {
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]])
; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVE1: scalar.ph:
+; CHECK-INTERLEAVE1: for.exit:
+; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP13]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @zext_add_reduc_i8_i32_predicated(
; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]]) #[[ATTR0]] {
@@ -527,7 +528,8 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 {
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]])
; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-INTERLEAVED: scalar.ph:
+; CHECK-INTERLEAVED: for.exit:
+; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP13]]
;
; CHECK-MAXBW-LABEL: define i32 @zext_add_reduc_i8_i32_predicated(
; CHECK-MAXBW-SAME: ptr [[A:%.*]]) #[[ATTR0]] {
@@ -555,7 +557,8 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 {
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW: for.exit:
+; CHECK-MAXBW-NEXT: ret i32 [[TMP12]]
;
entry:
br label %for.body
@@ -674,7 +677,7 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 {
; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = sub <16 x i32> [[VEC_PHI]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP4]])
; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]]
@@ -700,7 +703,7 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 {
; CHECK-INTERLEAVED-NEXT: [[TMP7]] = sub <16 x i32> [[VEC_PHI1]], [[TMP5]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]]
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]])
@@ -726,7 +729,7 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 {
; CHECK-MAXBW-NEXT: [[TMP10]] = sub <vscale x 8 x i32> [[VEC_PHI]], [[TMP9]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP10]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
@@ -768,7 +771,7 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = add <16 x i32> [[TMP3]], [[VEC_PHI]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP4]])
; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]]
@@ -794,7 +797,7 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i32> [[TMP5]], [[VEC_PHI1]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]]
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]])
@@ -820,7 +823,7 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 {
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
@@ -871,7 +874,7 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <16 x i32> [[VEC_PHI]], [[BROADCAST_SPLAT]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]])
; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]]
@@ -906,7 +909,7 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i32> [[VEC_PHI2]], [[BROADCAST_SPLAT]]
; CHECK-INTERLEAVED-NEXT: [[TMP21]] = add nuw i32 [[VEC_PHI1]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP21]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]]
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]])
@@ -942,7 +945,7 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 {
; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 16 x i32> [[VEC_PHI]], [[BROADCAST_SPLAT]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv16i32(<vscale x 16 x i32> [[TMP11]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]]
@@ -993,7 +996,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP6]] = add <16 x i32> [[VEC_PHI]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP6]])
; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]]
@@ -1028,7 +1031,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP8]] = add <16 x i32> [[VEC_PHI2]], [[TMP3]]
; CHECK-INTERLEAVED-NEXT: [[TMP22]] = add nuw i32 [[VEC_PHI1]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP22]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP8]], [[TMP7]]
; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]])
@@ -1064,7 +1067,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
index c4feabe..edf7e28 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
@@ -50,22 +50,9 @@ define i32 @pr70988(ptr %src, i32 %n) {
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP17]], i32 [[TMP18]])
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDUC:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDUC_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[MAX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[TMP24:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[INDUC]]
-; CHECK-NEXT: [[TMP22:%.*]] = load ptr, ptr [[GEP]], align 8
-; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
-; CHECK-NEXT: [[TMP24]] = tail call i32 @llvm.smax.i32(i32 [[TMP23]], i32 [[MAX]])
-; CHECK-NEXT: [[INDUC_NEXT]] = add nuw nsw i64 [[INDUC]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDUC_NEXT]], [[UMAX]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[TMP24]], [[LOOP]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[RES]]
+; CHECK-NEXT: ret i32 [[RDX_MINMAX]]
;
entry:
%1 = and i32 %n, 15
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
index 0c7dc29..0f82de62 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
@@ -241,42 +241,8 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2)
; PRED: [[MIDDLE_BLOCK]]:
; PRED-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP41]])
; PRED-NEXT: br label %[[EXIT:.*]]
-; PRED: [[SCALAR_PH:.*]]:
-; PRED-NEXT: br label %[[LOOP:.*]]
-; PRED: [[LOOP]]:
-; PRED-NEXT: [[TMP45:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[TMP53:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[SCALAR_RECUR10:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[TMP45]], %[[LOOP]] ]
-; PRED-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[SUM_RED:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[RED_2:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[TMP52:%.*]] = add i64 [[Y]], 1
-; PRED-NEXT: [[GEP_1:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP52]]
-; PRED-NEXT: [[TMP53]] = load i32, ptr [[GEP_1]], align 4
-; PRED-NEXT: [[OR3:%.*]] = or i32 [[SCALAR_RECUR10]], [[X]]
-; PRED-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1
-; PRED-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 1
-; PRED-NEXT: [[TMP54:%.*]] = shl i32 [[OR3]], 1
-; PRED-NEXT: [[TMP55:%.*]] = or i32 [[TMP54]], 2
-; PRED-NEXT: [[SHL19:%.*]] = shl i32 [[X]], 1
-; PRED-NEXT: [[TMP56:%.*]] = or i32 [[SHR]], [[SHL19]]
-; PRED-NEXT: [[TMP57:%.*]] = or i32 [[TMP56]], [[TMP55]]
-; PRED-NEXT: [[TMP58:%.*]] = or i32 [[TMP57]], [[X]]
-; PRED-NEXT: [[OR20:%.*]] = or i32 [[Z]], [[X]]
-; PRED-NEXT: [[NOT:%.*]] = and i32 [[OR20]], 1
-; PRED-NEXT: [[AND:%.*]] = xor i32 [[NOT]], 1
-; PRED-NEXT: [[IDX_EXT_1:%.*]] = zext i32 [[AND]] to i64
-; PRED-NEXT: [[GEP_2:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[IDX_EXT_1]]
-; PRED-NEXT: [[TMP59:%.*]] = load i32, ptr [[GEP_2]], align 4
-; PRED-NEXT: [[SHR24:%.*]] = lshr i32 [[TMP58]], 1
-; PRED-NEXT: [[IDX_EXT_2:%.*]] = zext i32 [[SHR24]] to i64
-; PRED-NEXT: [[GEP_3:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[IDX_EXT_2]]
-; PRED-NEXT: [[TMP60:%.*]] = load i32, ptr [[GEP_3]], align 4
-; PRED-NEXT: [[RED_1:%.*]] = or i32 [[TMP59]], [[SUM_RED]]
-; PRED-NEXT: [[RED_2]] = or i32 [[RED_1]], [[TMP60]]
-; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV1]], [[Y]]
-; PRED-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; PRED: [[EXIT]]:
-; PRED-NEXT: [[RED_2_LCSSA:%.*]] = phi i32 [ [[RED_2]], %[[LOOP]] ], [ [[TMP44]], %[[MIDDLE_BLOCK]] ]
-; PRED-NEXT: ret i32 [[RED_2_LCSSA]]
+; PRED-NEXT: ret i32 [[TMP44]]
;
entry:
br label %loop
@@ -497,21 +463,8 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 {
; PRED: [[MIDDLE_BLOCK]]:
; PRED-NEXT: [[TMP19:%.*]] = call i16 @llvm.vector.reduce.or.nxv8i16(<vscale x 8 x i16> [[TMP16]])
; PRED-NEXT: br label %[[EXIT:.*]]
-; PRED: [[SCALAR_PH:.*]]:
-; PRED-NEXT: br label %[[LOOP:.*]]
-; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[RED:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
-; PRED-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
-; PRED-NEXT: [[DIV:%.*]] = udiv i16 [[L]], [[X]]
-; PRED-NEXT: [[RED_NEXT]] = or i16 [[DIV]], [[RED]]
-; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]]
-; PRED-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; PRED: [[EXIT]]:
-; PRED-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i16 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP19]], %[[MIDDLE_BLOCK]] ]
-; PRED-NEXT: ret i16 [[RED_NEXT_LCSSA]]
+; PRED-NEXT: ret i16 [[TMP19]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll
index c15e8d4..ab9b48f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll
@@ -616,6 +616,45 @@ exit:
ret double %red.next
}
+define i32 @test_ptr_iv_load_used_by_other_load(ptr %start, ptr %end) {
+; CHECK-LABEL: define i32 @test_ptr_iv_load_used_by_other_load(
+; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ null, %[[ENTRY]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[IV]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 8
+; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[TMP1]], 0
+; CHECK-NEXT: [[C_EXT:%.*]] = zext i1 [[C]] to i32
+; CHECK-NEXT: [[RED_NEXT]] = or i32 [[RED]], [[C_EXT]]
+; CHECK-NEXT: [[IV_NEXT]] = getelementptr nusw i8, ptr [[IV]], i64 32
+; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[IV]], [[END]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi i32 [ [[RED]], %[[LOOP]] ]
+; CHECK-NEXT: ret i32 [[RED_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop: ; preds = %loop, %entry
+ %iv = phi ptr [ %iv.next, %loop ], [ null, %entry ]
+ %red = phi i32 [ %red.next, %loop ], [ 0, %entry ]
+ %0 = load ptr, ptr %iv, align 8
+ %1 = load i8, ptr %0, align 8
+ %c = icmp ne i8 %1, 0
+ %c.ext = zext i1 %c to i32
+ %red.next = or i32 %red, %c.ext
+ %iv.next = getelementptr nusw i8, ptr %iv, i64 32
+ %ec = icmp eq ptr %iv, %end
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret i32 %red
+}
+
attributes #0 = { "target-cpu"="neoverse-512tvb" }
!0 = !{!1, !2, i64 0}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
index 885c7904..5072058 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
@@ -144,20 +144,8 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]]
-; CHECK-ORDERED-TF: scalar.ph:
-; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP12]], [[SUM_07]]
-; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
-; CHECK-ORDERED-TF-NEXT: ret float [[ADD_LCSSA]]
+; CHECK-ORDERED-TF-NEXT: ret float [[TMP9]]
;
@@ -390,23 +378,11 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP39]], i64 [[TMP6]])
; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = extractelement <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = xor i1 [[TMP40]], true
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]]
-; CHECK-ORDERED-TF: scalar.ph:
-; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP42]], [[SUM_07]]
-; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP30]], [[MIDDLE_BLOCK]] ]
-; CHECK-ORDERED-TF-NEXT: ret float [[ADD_LCSSA]]
+; CHECK-ORDERED-TF-NEXT: ret float [[TMP30]]
;
@@ -630,30 +606,12 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]]
-; CHECK-ORDERED-TF: scalar.ph:
-; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[A2]], [[SCALAR_PH:%.*]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[A1]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ADD1]] = fadd float [[TMP19]], [[ADD_PHI2]]
-; CHECK-ORDERED-TF-NEXT: [[OR:%.*]] = or disjoint i64 [[IV]], 1
-; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]]
-; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ADD2]] = fadd float [[TMP20]], [[ADD_PHI1]]
-; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 2
-; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ]
-; CHECK-ORDERED-TF-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
-; CHECK-ORDERED-TF-NEXT: store float [[ADD1_LCSSA]], ptr [[A]], align 4
-; CHECK-ORDERED-TF-NEXT: store float [[ADD2_LCSSA]], ptr [[ARRAYIDXA]], align 4
+; CHECK-ORDERED-TF-NEXT: store float [[TMP16]], ptr [[A]], align 4
+; CHECK-ORDERED-TF-NEXT: store float [[TMP14]], ptr [[ARRAYIDXA]], align 4
; CHECK-ORDERED-TF-NEXT: ret void
;
@@ -863,28 +821,13 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP7]])
; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END_LOOPEXIT:%.*]]
-; CHECK-ORDERED-TF: scalar.ph:
-; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-ORDERED-TF-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
-; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ADD:%.*]] = fadd float [[TMP15]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[RDX]] = fadd float [[RES_014]], [[ADD]]
-; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-ORDERED-TF: for.end.loopexit:
-; CHECK-ORDERED-TF-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[RDX_LCSSA]], [[FOR_END_LOOPEXIT]] ]
+; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP12]], [[FOR_END_LOOPEXIT]] ]
; CHECK-ORDERED-TF-NEXT: ret float [[RES]]
;
@@ -1081,31 +1024,11 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP6]])
; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]]
-; CHECK-ORDERED-TF: scalar.ph:
-; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 1.000000e+00, [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ]
-; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-ORDERED-TF-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP15]], 0.000000e+00
-; CHECK-ORDERED-TF-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]]
-; CHECK-ORDERED-TF: if.then:
-; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-ORDERED-TF-NEXT: br label [[FOR_INC]]
-; CHECK-ORDERED-TF: for.inc:
-; CHECK-ORDERED-TF-NEXT: [[PHI:%.*]] = phi float [ [[TMP16]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[FADD]] = fadd float [[RES]], [[PHI]]
-; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ]
-; CHECK-ORDERED-TF-NEXT: ret float [[RDX]]
+; CHECK-ORDERED-TF-NEXT: ret float [[TMP12]]
;
@@ -1245,7 +1168,7 @@ define float @fadd_multiple(ptr noalias nocapture %a, ptr noalias nocapture %b,
; CHECK-ORDERED-TF-NEXT: [[ADD3]] = fadd float [[ADD]], [[TMP1]]
; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
; CHECK-ORDERED-TF-NEXT: [[RDX:%.*]] = phi float [ [[ADD3]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: ret float [[RDX]]
@@ -1542,25 +1465,11 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP6]])
; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = extractelement <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = xor i1 [[TMP54]], true
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]]
-; CHECK-ORDERED-TF: scalar.ph:
-; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP56]], float [[TMP57]], float [[SUM_07]])
-; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP44]], [[MIDDLE_BLOCK]] ]
-; CHECK-ORDERED-TF-NEXT: ret float [[MULADD_LCSSA]]
+; CHECK-ORDERED-TF-NEXT: ret float [[TMP44]]
;
@@ -1852,25 +1761,11 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP6]])
; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = extractelement <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = xor i1 [[TMP54]], true
-; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK-ORDERED-TF: middle.block:
; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]]
-; CHECK-ORDERED-TF: scalar.ph:
-; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP56]], float [[TMP57]], float [[SUM_07]])
-; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK-ORDERED-TF: for.end:
-; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP44]], [[MIDDLE_BLOCK]] ]
-; CHECK-ORDERED-TF-NEXT: ret float [[MULADD_LCSSA]]
+; CHECK-ORDERED-TF-NEXT: ret float [[TMP44]]
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll b/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll
index 4e989c5..3b016f8 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll
@@ -129,20 +129,8 @@ define i64 @same_exit_block_pre_inc_use4() {
; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP8]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i64, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i64 [[INDEX]], [[LD1]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -203,21 +191,8 @@ define i64 @loop_contains_safe_call() #1 {
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP9]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load float, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[SQRT:%.*]] = tail call fast float @llvm.sqrt.f32(float [[LD1]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp fast ult float [[SQRT]], 3.000000e+00
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -365,22 +340,8 @@ define i64 @loop_contains_load_after_early_exit(ptr dereferenceable(1024) align(
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP11]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LD1]], 1
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i64, ptr [[ARRAYIDX2]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[LD2]], [[LOOP_INC]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
index 79fb3fd..c775b44 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
@@ -88,16 +88,7 @@ define void @cost_store_i8(ptr %dst) #0 {
; PRED-NEXT: [[TMP12:%.*]] = xor i1 [[TMP14]], true
; PRED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; PRED: middle.block:
-; PRED-NEXT: br label [[EXIT:%.*]]
-; PRED: scalar.ph:
; PRED-NEXT: br label [[LOOP:%.*]]
-; PRED: loop:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; PRED-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]]
-; PRED-NEXT: store i8 0, ptr [[GEP]], align 1
-; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 100
-; PRED-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; PRED: exit:
; PRED-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll
index 3f230b7..e084307 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll
@@ -490,8 +490,7 @@ define float @fadd_predicated(ptr noalias nocapture %a, i64 %n) {
; CHECK-ORDERED: %[[MASK:.*]] = select <2 x i1> %0, <2 x float> %[[PHI]], <2 x float> splat (float -0.000000e+00)
; CHECK-ORDERED: %[[RDX]] = call float @llvm.vector.reduce.fadd.v2f32(float %[[RDX_PHI]], <2 x float> %[[MASK]])
; CHECK-ORDERED: for.end:
-; CHECK-ORDERED: %[[RES_PHI:.*]] = phi float [ %[[FADD:.*]], %for.body ], [ %[[RDX]], %middle.block ]
-; CHECK-ORDERED: ret float %[[RES_PHI]]
+; CHECK-ORDERED: ret float %[[RDX]]
; CHECK-UNORDERED-LABEL: @fadd_predicated
; CHECK-UNORDERED: vector.ph
@@ -507,12 +506,8 @@ define float @fadd_predicated(ptr noalias nocapture %a, i64 %n) {
; CHECK-UNORDERED-NOT: call float @llvm.vector.reduce.fadd
; CHECK-UNORDERED: middle.block
; CHECK-UNORDERED: %[[RDX:.*]] = call float @llvm.vector.reduce.fadd.v2f32(float -0.000000e+00, <2 x float> %[[MASK]])
-; CHECK-UNORDERED: for.body
-; CHECK-UNORDERED: %[[LOAD:.*]] = load float, ptr
-; CHECK-UNORDERED: %[[FADD2:.*]] = fadd float {{.*}}, %[[LOAD]]
; CHECK-UNORDERED: for.end
-; CHECK-UNORDERED: %[[SUM:.*]] = phi float [ %[[FADD2]], %for.body ], [ %[[RDX]], %middle.block ]
-; CHECK-UNORDERED: ret float %[[SUM]]
+; CHECK-UNORDERED: ret float %[[RDX]]
; CHECK-NOT-VECTORIZED-LABEL: @fadd_predicated
; CHECK-NOT-VECTORIZED-NOT: vector.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll
index bdbbfdf..9526a84 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll
@@ -31,10 +31,7 @@ define void @struct_return_widen(ptr noalias %in, ptr noalias writeonly %out_a,
; CHECK: [[VECTOR_BODY:.*:]]
; CHECK: [[TMP2:%.*]] = call { <2 x half>, <2 x half> } @fixed_vec_foo(<2 x half> [[WIDE_LOAD:%.*]])
; CHECK: [[TMP3:%.*]] = call { <2 x half>, <2 x half> } @fixed_vec_foo(<2 x half> [[WIDE_LOAD1:%.*]])
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { half, half } @foo(half [[IN_VAL:%.*]]) #[[ATTR2:[0-9]+]]
; CHECK: [[EXIT:.*:]]
;
entry:
@@ -82,12 +79,9 @@ define void @struct_return_replicate(ptr noalias %in, ptr noalias writeonly %out
; CHECK: [[ENTRY:.*:]]
; CHECK: [[VECTOR_PH:.*:]]
; CHECK: [[VECTOR_BODY:.*:]]
-; CHECK: [[TMP2:%.*]] = tail call { half, half } @foo(half [[TMP1:%.*]]) #[[ATTR3:[0-9]+]]
-; CHECK: [[TMP4:%.*]] = tail call { half, half } @foo(half [[TMP3:%.*]]) #[[ATTR3]]
+; CHECK: [[TMP2:%.*]] = tail call { half, half } @foo(half [[TMP1:%.*]]) #[[ATTR2:[0-9]+]]
+; CHECK: [[TMP4:%.*]] = tail call { half, half } @foo(half [[TMP3:%.*]]) #[[ATTR2]]
; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
-; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { half, half } @foo(half [[IN_VAL:%.*]]) #[[ATTR3]]
; CHECK: [[EXIT:.*:]]
;
entry:
@@ -162,7 +156,7 @@ define void @struct_return_scalable(ptr noalias %in, ptr noalias writeonly %out_
; CHECK: [[MIDDLE_BLOCK:.*:]]
; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { half, half } @foo(half [[IN_VAL:%.*]]) #[[ATTR3]]
+; CHECK: [[CALL:%.*]] = tail call { half, half } @foo(half [[IN_VAL:%.*]]) #[[ATTR2]]
; CHECK: [[EXIT:.*:]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll
index 8d33ccb..bbc0e33 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll
@@ -49,7 +49,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 {
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]]
; CHECK: vec.epilog.ph:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
@@ -59,7 +59,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 {
; CHECK-NEXT: store <8 x i8> splat (i8 1), ptr [[TMP9]], align 1
; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024
-; CHECK-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK: vec.epilog.scalar.ph:
@@ -97,7 +97,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 {
; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK-VF8: vec.epilog.iter.check:
; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8
-; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]]
; CHECK-VF8: vec.epilog.ph:
; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
@@ -107,7 +107,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 {
; CHECK-VF8-NEXT: store <8 x i8> splat (i8 1), ptr [[TMP9]], align 1
; CHECK-VF8-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8
; CHECK-VF8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024
-; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-VF8: vec.epilog.middle.block:
; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK-VF8: vec.epilog.scalar.ph:
@@ -150,7 +150,7 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) {
; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP5]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]]
@@ -182,13 +182,13 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) {
; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP7]], align 1
; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-VF8-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-VF8-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-VF8-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-VF8: middle.block:
; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK-VF8: vec.epilog.iter.check:
; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8
-; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]]
; CHECK-VF8: vec.epilog.ph:
; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
@@ -198,7 +198,7 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) {
; CHECK-VF8-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP9]], align 1
; CHECK-VF8-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8
; CHECK-VF8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024
-; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK-VF8: vec.epilog.middle.block:
; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK-VF8: vec.epilog.scalar.ph:
@@ -261,13 +261,13 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) {
; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP7]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]]
; CHECK: vec.epilog.ph:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[N]], 8
@@ -279,7 +279,7 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) {
; CHECK-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP9]], align 1
; CHECK-NEXT: [[INDEX_NEXT5]] = add nuw i64 [[INDEX4]], 8
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT5]], [[N_VEC3]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: [[CMP_N6:%.*]] = icmp eq i64 [[N]], [[N_VEC3]]
; CHECK-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
@@ -313,13 +313,13 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) {
; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP7]], align 1
; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-VF8-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-VF8-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-VF8-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-VF8: middle.block:
; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK-VF8: vec.epilog.iter.check:
; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8
-; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]]
; CHECK-VF8: vec.epilog.ph:
; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-VF8-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[N]], 8
@@ -331,7 +331,7 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) {
; CHECK-VF8-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP9]], align 1
; CHECK-VF8-NEXT: [[INDEX_NEXT5]] = add nuw i64 [[INDEX4]], 8
; CHECK-VF8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT5]], [[N_VEC3]]
-; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-VF8: vec.epilog.middle.block:
; CHECK-VF8-NEXT: [[CMP_N6:%.*]] = icmp eq i64 [[N]], [[N_VEC3]]
; CHECK-VF8-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
@@ -382,14 +382,14 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP6]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 10000, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]]
; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]]
; CHECK: vec.epilog.ph:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[START]], i64 10000
@@ -400,7 +400,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-NEXT: store <8 x i8> zeroinitializer, ptr [[NEXT_GEP2]], align 1
; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 8
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 10000
-; CHECK-NEXT: br i1 [[TMP9]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK: vec.epilog.scalar.ph:
@@ -433,14 +433,14 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP6]], align 1
; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-VF8-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-VF8-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-VF8-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK-VF8: middle.block:
; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 10000, [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK-VF8: vec.epilog.iter.check:
; CHECK-VF8-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]]
; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8
-; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]]
; CHECK-VF8: vec.epilog.ph:
; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-VF8-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[START]], i64 10000
@@ -451,7 +451,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-VF8-NEXT: store <8 x i8> zeroinitializer, ptr [[NEXT_GEP2]], align 1
; CHECK-VF8-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 8
; CHECK-VF8-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 10000
-; CHECK-VF8-NEXT: br i1 [[TMP9]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-VF8-NEXT: br i1 [[TMP9]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-VF8: vec.epilog.middle.block:
; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK-VF8: vec.epilog.scalar.ph:
@@ -514,13 +514,13 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1
; CHECK-NEXT: store <vscale x 4 x float> [[TMP13]], ptr [[TMP11]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF15:![0-9]+]]
; CHECK: vec.epilog.ph:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: [[N_MOD_VF4:%.*]] = urem i64 [[N]], 2
@@ -536,7 +536,7 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1
; CHECK-NEXT: store <2 x float> [[TMP20]], ptr [[TMP19]], align 4
; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX6]], 2
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC5]]
-; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: [[CMP_N10:%.*]] = icmp eq i64 [[N]], [[N_VEC5]]
; CHECK-NEXT: br i1 [[CMP_N10]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
@@ -576,7 +576,7 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1
; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP11]], ptr [[TMP9]], align 4
; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
; CHECK-VF8-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK-VF8: middle.block:
; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]]
@@ -606,18 +606,12 @@ exit:
}
; Loop with vscale-based trip count vscale x 1024.
-; TODO: No epilogue vectorizations should remain when choosing VF = vscale x 4.
define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalias %b) vscale_range(1, 16) #0 {
; CHECK-LABEL: @trip_count_vscale_no_epilogue_iterations(
-; CHECK-NEXT: iter.check:
+; CHECK-NEXT: entry:
; CHECK-NEXT: [[V:%.*]] = tail call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[N:%.*]] = mul nuw nsw i64 [[V]], 1024
-; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]]
-; CHECK: vector.main.loop.iter.check:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 3
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
@@ -625,7 +619,7 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 2
@@ -644,31 +638,11 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia
; CHECK-NEXT: store <vscale x 4 x float> [[TMP13]], ptr [[TMP11]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
-; CHECK: vec.epilog.iter.check:
-; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
-; CHECK: vec.epilog.ph:
-; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
-; CHECK: vec.epilog.vector.body:
-; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT7:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX4]]
-; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x float>, ptr [[TMP18]], align 4
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX4]]
-; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x float>, ptr [[TMP19]], align 4
-; CHECK-NEXT: [[TMP20:%.*]] = fmul <2 x float> [[WIDE_LOAD5]], [[WIDE_LOAD6]]
-; CHECK-NEXT: store <2 x float> [[TMP20]], ptr [[TMP19]], align 4
-; CHECK-NEXT: [[INDEX_NEXT7]] = add nuw i64 [[INDEX4]], 2
-; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT7]], [[N]]
-; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
-; CHECK: vec.epilog.middle.block:
-; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
-; CHECK: vec.epilog.scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]]
+; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
;
@@ -703,7 +677,7 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia
; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP11]], ptr [[TMP9]], align 4
; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
; CHECK-VF8-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK-VF8: middle.block:
; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
index 33b3629..3b0bd87 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
@@ -116,7 +116,8 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no
; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP19]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
; PREDICATED_TAIL_FOLDING: middle.block:
; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]]
-; PREDICATED_TAIL_FOLDING: scalar.ph:
+; PREDICATED_TAIL_FOLDING: for.end:
+; PREDICATED_TAIL_FOLDING-NEXT: ret void
;
entry:
%conv = zext i8 %guard to i32
@@ -243,10 +244,11 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no
; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]])
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]]
+; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP3:![0-9]+]]
; PREDICATED_TAIL_FOLDING: middle.block:
; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]]
-; PREDICATED_TAIL_FOLDING: scalar.ph:
+; PREDICATED_TAIL_FOLDING: for.end:
+; PREDICATED_TAIL_FOLDING-NEXT: ret void
;
entry:
%conv = zext i8 %guard to i32
@@ -377,10 +379,11 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no
; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]])
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT4]]
-; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP18]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP6:![0-9]+]]
+; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP18]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]]
; PREDICATED_TAIL_FOLDING: middle.block:
; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]]
-; PREDICATED_TAIL_FOLDING: scalar.ph:
+; PREDICATED_TAIL_FOLDING: for.end:
+; PREDICATED_TAIL_FOLDING-NEXT: ret void
;
entry:
%conv = zext i8 %guard1 to i32
@@ -537,10 +540,11 @@ define dso_local void @masked_strided_factor4(ptr noalias nocapture readonly %p,
; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]])
; PREDICATED_TAIL_FOLDING-NEXT: [[TMP23:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP8:![0-9]+]]
+; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP5:![0-9]+]]
; PREDICATED_TAIL_FOLDING: middle.block:
; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]]
-; PREDICATED_TAIL_FOLDING: scalar.ph:
+; PREDICATED_TAIL_FOLDING: for.end:
+; PREDICATED_TAIL_FOLDING-NEXT: ret void
;
entry:
%conv = zext i8 %guard to i32
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll
index 16acd3f..b8b4fbd 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll
@@ -69,7 +69,8 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: while.end.loopexit:
+; CHECK-NEXT: ret void
;
entry:
br label %while.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll
index 069d369..cb2c003 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll
@@ -29,7 +29,8 @@ define void @trip1025_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapt
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
+; CHECK: for.end:
+; CHECK-NEXT: ret void
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll
index 61448bd..33ee0d6 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll
@@ -33,7 +33,10 @@ define void @cannot_overflow_i32_induction_var(ptr noalias %dst, ptr readonly %s
; CHECK-NEXT: br i1 [[TMP5]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: for.cond.cleanup.loopexit:
+; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: ret void
;
entry:
%cmp6.not = icmp eq i32 %N, 0
@@ -87,10 +90,13 @@ define void @can_overflow_i64_induction_var(ptr noalias %dst, ptr readonly %src,
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP7]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP2]])
; CHECK-NEXT: [[TMP8:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
-; CHECK-NEXT: br i1 [[TMP8]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: for.cond.cleanup.loopexit:
+; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: ret void
;
entry:
%cmp6.not = icmp eq i64 %N, 0
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
index b725669f..b5544dc 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
@@ -36,21 +36,9 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]])
-; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
-; CHECK: while.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[GEP]], align 4
-; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[VAL]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
-; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: while.end.loopexit:
-; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], [[WHILE_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP19]]
;
; CHECK-IN-LOOP-LABEL: @add_reduction_i32(
; CHECK-IN-LOOP-NEXT: entry:
@@ -81,21 +69,9 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 {
; CHECK-IN-LOOP-NEXT: [[TMP19:%.*]] = xor i1 [[TMP18]], true
; CHECK-IN-LOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-IN-LOOP: middle.block:
-; CHECK-IN-LOOP-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK-IN-LOOP: scalar.ph:
; CHECK-IN-LOOP-NEXT: br label [[WHILE_BODY:%.*]]
-; CHECK-IN-LOOP: while.body:
-; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
-; CHECK-IN-LOOP-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
-; CHECK-IN-LOOP-NEXT: [[VAL:%.*]] = load i32, ptr [[GEP]], align 4
-; CHECK-IN-LOOP-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[VAL]]
-; CHECK-IN-LOOP-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
-; CHECK-IN-LOOP-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]]
-; CHECK-IN-LOOP-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-IN-LOOP: while.end.loopexit:
-; CHECK-IN-LOOP-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], [[WHILE_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
-; CHECK-IN-LOOP-NEXT: ret i32 [[RED_NEXT_LCSSA]]
+; CHECK-IN-LOOP-NEXT: ret i32 [[TMP15]]
;
entry:
br label %while.body
@@ -141,23 +117,11 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true
-; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
-; CHECK: while.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR]], i64 [[INDEX]]
-; CHECK-NEXT: [[VAL:%.*]] = load float, ptr [[GEP]], align 4
-; CHECK-NEXT: [[RED_NEXT]] = fadd float [[RED]], [[VAL]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
-; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: while.end.loopexit:
-; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], [[WHILE_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]]
+; CHECK-NEXT: ret float [[TMP14]]
;
; CHECK-IN-LOOP-LABEL: @add_reduction_f32(
; CHECK-IN-LOOP-NEXT: entry:
@@ -185,23 +149,11 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 {
; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-IN-LOOP-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-IN-LOOP-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true
-; CHECK-IN-LOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-IN-LOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-IN-LOOP: middle.block:
-; CHECK-IN-LOOP-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK-IN-LOOP: scalar.ph:
; CHECK-IN-LOOP-NEXT: br label [[WHILE_BODY:%.*]]
-; CHECK-IN-LOOP: while.body:
-; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
-; CHECK-IN-LOOP-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR]], i64 [[INDEX]]
-; CHECK-IN-LOOP-NEXT: [[VAL:%.*]] = load float, ptr [[GEP]], align 4
-; CHECK-IN-LOOP-NEXT: [[RED_NEXT]] = fadd float [[RED]], [[VAL]]
-; CHECK-IN-LOOP-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
-; CHECK-IN-LOOP-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]]
-; CHECK-IN-LOOP-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-IN-LOOP: while.end.loopexit:
-; CHECK-IN-LOOP-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], [[WHILE_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
-; CHECK-IN-LOOP-NEXT: ret float [[RED_NEXT_LCSSA]]
+; CHECK-IN-LOOP-NEXT: ret float [[TMP14]]
;
entry:
br label %while.body
@@ -251,32 +203,12 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 {
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP16:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-NEXT: [[TMP18:%.*]] = xor i1 [[TMP16]], true
-; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP20]])
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 7, [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]]
-; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP26]], 5
-; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]]
-; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[RDX]], [[TMP27]]
-; CHECK-NEXT: br label [[FOR_INC]]
-; CHECK: for.inc:
-; CHECK-NEXT: [[RES]] = phi i32 [ [[RDX]], [[FOR_BODY]] ], [ [[XOR]], [[IF_THEN]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br label [[FOR_INC:%.*]]
; CHECK: for.end:
-; CHECK-NEXT: [[RES_LCSSA:%.*]] = phi i32 [ [[RES]], [[FOR_INC]] ], [ [[TMP25]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[RES_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP25]]
;
; CHECK-IN-LOOP-LABEL: @cond_xor_reduction(
; CHECK-IN-LOOP-NEXT: entry:
@@ -308,31 +240,11 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 {
; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-IN-LOOP-NEXT: [[TMP22:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-IN-LOOP-NEXT: [[TMP23:%.*]] = xor i1 [[TMP22]], true
-; CHECK-IN-LOOP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-IN-LOOP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-IN-LOOP: middle.block:
-; CHECK-IN-LOOP-NEXT: br label [[FOR_END:%.*]]
-; CHECK-IN-LOOP: scalar.ph:
-; CHECK-IN-LOOP-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK-IN-LOOP: for.body:
-; CHECK-IN-LOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-IN-LOOP-NEXT: [[RDX:%.*]] = phi i32 [ 7, [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
-; CHECK-IN-LOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]]
-; CHECK-IN-LOOP-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-IN-LOOP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP24]], 5
-; CHECK-IN-LOOP-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]]
-; CHECK-IN-LOOP: if.then:
-; CHECK-IN-LOOP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; CHECK-IN-LOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-IN-LOOP-NEXT: [[XOR:%.*]] = xor i32 [[RDX]], [[TMP25]]
-; CHECK-IN-LOOP-NEXT: br label [[FOR_INC]]
-; CHECK-IN-LOOP: for.inc:
-; CHECK-IN-LOOP-NEXT: [[RES]] = phi i32 [ [[RDX]], [[FOR_BODY]] ], [ [[XOR]], [[IF_THEN]] ]
-; CHECK-IN-LOOP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-IN-LOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-IN-LOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-IN-LOOP-NEXT: br label [[FOR_INC:%.*]]
; CHECK-IN-LOOP: for.end:
-; CHECK-IN-LOOP-NEXT: [[RES_LCSSA:%.*]] = phi i32 [ [[RES]], [[FOR_INC]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
-; CHECK-IN-LOOP-NEXT: ret i32 [[RES_LCSSA]]
+; CHECK-IN-LOOP-NEXT: ret i32 [[TMP19]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll
index 1879386..5531b3c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll
@@ -72,7 +72,8 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: while.end.loopexit:
+; CHECK-NEXT: ret void
;
entry:
br label %while.body
@@ -176,10 +177,11 @@ define void @cond_memset(i32 %val, ptr noalias readonly %cond_ptr, ptr noalias %
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP93]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP66:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-NEXT: [[TMP67:%.*]] = xor i1 [[TMP66]], true
-; CHECK-NEXT: br i1 [[TMP67]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP67]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: while.end.loopexit:
+; CHECK-NEXT: ret void
;
entry:
br label %while.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
index ec17872..9ebe790 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
@@ -33,7 +33,8 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: while.end.loopexit:
+; CHECK-NEXT: ret void
;
entry:
br label %while.body
@@ -73,10 +74,11 @@ define void @simple_memset_v4i32(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX1]], i64 [[TMP2]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-NEXT: [[TMP5:%.*]] = xor i1 [[TMP6]], true
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: while.end.loopexit:
+; CHECK-NEXT: ret void
;
entry:
br label %while.body
@@ -120,10 +122,11 @@ define void @simple_memcpy(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-NEXT: [[TMP14:%.*]] = xor i1 [[TMP12]], true
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: while.end.loopexit:
+; CHECK-NEXT: ret void
;
entry:
br label %while.body
@@ -180,10 +183,11 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK-NEXT: [[TMP21:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-NEXT: [[TMP22:%.*]] = xor i1 [[TMP21]], true
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: while.end.loopexit:
+; CHECK-NEXT: ret void
;
entry:
br label %while.body
@@ -231,10 +235,11 @@ define void @simple_gather_scatter(ptr noalias %dst, ptr noalias %src, ptr noali
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-NEXT: [[TMP16:%.*]] = xor i1 [[TMP15]], true
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: while.end.loopexit:
+; CHECK-NEXT: ret void
;
entry:
br label %while.body
@@ -284,10 +289,11 @@ define void @uniform_load(ptr noalias %dst, ptr noalias readonly %src, i64 %n) #
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-NEXT: [[TMP13:%.*]] = xor i1 [[TMP14]], true
-; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
+; CHECK: for.end:
+; CHECK-NEXT: ret void
;
entry:
@@ -342,10 +348,11 @@ define void @cond_uniform_load(ptr noalias %dst, ptr noalias readonly %src, ptr
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true
-; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
+; CHECK: for.end:
+; CHECK-NEXT: ret void
;
entry:
@@ -403,10 +410,11 @@ define void @uniform_store(ptr noalias %dst, ptr noalias readonly %src, i64 %n)
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-NEXT: [[TMP12:%.*]] = xor i1 [[TMP13]], true
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
+; CHECK: for.end:
+; CHECK-NEXT: ret void
;
entry:
@@ -454,10 +462,11 @@ define void @simple_fdiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: while.end.loopexit:
+; CHECK-NEXT: ret void
;
entry:
br label %while.body
@@ -509,10 +518,11 @@ define void @simple_idiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-NEXT: [[TMP17:%.*]] = xor i1 [[TMP14]], true
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: while.end.loopexit:
+; CHECK-NEXT: ret void
;
entry:
br label %while.body
@@ -551,7 +561,7 @@ define void @simple_memset_trip1024(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: store <vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 4
; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP3]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END_LOOPEXIT:%.*]], label [[SCALAR_PH:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
index e7d25a0..742097b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
@@ -562,7 +562,8 @@ define void @simple_histogram_tailfold(ptr noalias %buckets, ptr readonly %indic
; CHECK-NEXT: br i1 [[TMP11]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: for.exit:
+; CHECK-NEXT: ret void
;
entry:
br label %for.body
@@ -626,7 +627,7 @@ define void @simple_histogram_rtdepcheck(ptr noalias %buckets, ptr %array, ptr %
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
@@ -719,7 +720,7 @@ define void @simple_histogram_64b(ptr noalias %buckets, ptr readonly %indices, i
; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64(<vscale x 2 x ptr> [[TMP6]], i64 1, <vscale x 2 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll
index e6ff39b..6da3c77c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll
@@ -317,19 +317,7 @@ define void @test_v4_v4m(ptr noalias %a, ptr readonly %b) #3 {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8
-; CHECK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR1:[0-9]+]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
@@ -369,19 +357,7 @@ define void @test_v2_v4m(ptr noalias %a, ptr readonly %b) #3 {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8
-; CHECK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR2:[0-9]+]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
@@ -421,19 +397,7 @@ define void @test_v2_v4(ptr noalias %a, ptr readonly %b) #3 {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8
-; CHECK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR3:[0-9]+]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
index c44db7d..1607755 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
@@ -71,16 +71,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]]
; DATA-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; DATA: middle.block:
-; DATA-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; DATA: scalar.ph:
; DATA-NEXT: br label [[WHILE_BODY:%.*]]
-; DATA: while.body:
-; DATA-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; DATA-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
-; DATA-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4
-; DATA-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
-; DATA-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]]
-; DATA-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]]
; DATA: while.end.loopexit:
; DATA-NEXT: ret void
;
@@ -115,16 +106,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_NO_LANEMASK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC]]
; DATA_NO_LANEMASK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; DATA_NO_LANEMASK: middle.block:
-; DATA_NO_LANEMASK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; DATA_NO_LANEMASK: scalar.ph:
; DATA_NO_LANEMASK-NEXT: br label [[WHILE_BODY:%.*]]
-; DATA_NO_LANEMASK: while.body:
-; DATA_NO_LANEMASK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; DATA_NO_LANEMASK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
-; DATA_NO_LANEMASK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4
-; DATA_NO_LANEMASK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
-; DATA_NO_LANEMASK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]]
-; DATA_NO_LANEMASK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]]
; DATA_NO_LANEMASK: while.end.loopexit:
; DATA_NO_LANEMASK-NEXT: ret void
;
@@ -150,16 +132,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_AND_CONTROL-NEXT: [[TMP7:%.*]] = xor i1 [[TMP6]], true
; DATA_AND_CONTROL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; DATA_AND_CONTROL: middle.block:
-; DATA_AND_CONTROL-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; DATA_AND_CONTROL: scalar.ph:
; DATA_AND_CONTROL-NEXT: br label [[WHILE_BODY:%.*]]
-; DATA_AND_CONTROL: while.body:
-; DATA_AND_CONTROL-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; DATA_AND_CONTROL-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
-; DATA_AND_CONTROL-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4
-; DATA_AND_CONTROL-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
-; DATA_AND_CONTROL-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]]
-; DATA_AND_CONTROL-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]]
; DATA_AND_CONTROL: while.end.loopexit:
; DATA_AND_CONTROL-NEXT: ret void
;
@@ -190,16 +163,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP12:%.*]] = xor i1 [[TMP15]], true
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; DATA_AND_CONTROL_NO_RT_CHECK: middle.block:
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]]
-; DATA_AND_CONTROL_NO_RT_CHECK: scalar.ph:
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br label [[WHILE_BODY:%.*]]
-; DATA_AND_CONTROL_NO_RT_CHECK: while.body:
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]]
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]]
; DATA_AND_CONTROL_NO_RT_CHECK: while.end.loopexit:
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
index 038330b..c261760 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
@@ -22,21 +22,6 @@ define void @load_store_interleave_group_tc_2(ptr noalias %data) {
; VF2-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; VF2: [[MIDDLE_BLOCK]]:
; VF2-NEXT: br label %[[EXIT:.*]]
-; VF2: [[SCALAR_PH:.*]]:
-; VF2-NEXT: br label %[[LOOP:.*]]
-; VF2: [[LOOP]]:
-; VF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VF2-NEXT: [[MUL_2:%.*]] = shl nsw i64 [[IV]], 1
-; VF2-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[MUL_2]]
-; VF2-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8
-; VF2-NEXT: store i64 [[L_0]], ptr [[DATA_0]], align 8
-; VF2-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[MUL_2]], 1
-; VF2-NEXT: [[DATA_1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[ADD_1]]
-; VF2-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8
-; VF2-NEXT: store i64 [[L_1]], ptr [[DATA_1]], align 8
-; VF2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; VF2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 2
-; VF2-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; VF2: [[EXIT]]:
; VF2-NEXT: ret void
;
@@ -86,33 +71,18 @@ define void @load_store_interleave_group_tc_2(ptr noalias %data) {
; VF4-NEXT: br i1 false, label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]]
; VF4: [[PRED_STORE_IF5]]:
; VF4-NEXT: [[TMP27:%.*]] = shl nsw i64 3, 1
-; VF4-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP27]]
-; VF4-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
-; VF4-NEXT: store i64 [[TMP29]], ptr [[TMP28]], align 8
-; VF4-NEXT: [[TMP30:%.*]] = or disjoint i64 [[TMP27]], 1
-; VF4-NEXT: [[TMP31:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP30]]
-; VF4-NEXT: [[TMP32:%.*]] = load i64, ptr [[TMP31]], align 8
-; VF4-NEXT: store i64 [[TMP32]], ptr [[TMP31]], align 8
-; VF4-NEXT: br label %[[PRED_STORE_CONTINUE6]]
-; VF4: [[PRED_STORE_CONTINUE6]]:
-; VF4-NEXT: br label %[[MIDDLE_BLOCK:.*]]
-; VF4: [[MIDDLE_BLOCK]]:
-; VF4-NEXT: br label %[[EXIT:.*]]
-; VF4: [[SCALAR_PH:.*]]:
-; VF4-NEXT: br label %[[LOOP:.*]]
-; VF4: [[LOOP]]:
-; VF4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VF4-NEXT: [[MUL_2:%.*]] = shl nsw i64 [[IV]], 1
-; VF4-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[MUL_2]]
+; VF4-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP27]]
; VF4-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8
; VF4-NEXT: store i64 [[L_0]], ptr [[DATA_0]], align 8
-; VF4-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[MUL_2]], 1
+; VF4-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[TMP27]], 1
; VF4-NEXT: [[DATA_1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[ADD_1]]
; VF4-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8
; VF4-NEXT: store i64 [[L_1]], ptr [[DATA_1]], align 8
-; VF4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; VF4-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 2
-; VF4-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
+; VF4-NEXT: br label %[[PRED_STORE_CONTINUE6]]
+; VF4: [[PRED_STORE_CONTINUE6]]:
+; VF4-NEXT: br label %[[MIDDLE_BLOCK:.*]]
+; VF4: [[MIDDLE_BLOCK]]:
+; VF4-NEXT: br label %[[EXIT:.*]]
; VF4: [[EXIT]]:
; VF4-NEXT: ret void
;
@@ -237,27 +207,6 @@ define void @test_complex_add_float_tc_4(ptr %res, ptr noalias %A, ptr noalias %
; VF2-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VF2: [[MIDDLE_BLOCK]]:
; VF2-NEXT: br label %[[EXIT:.*]]
-; VF2: [[SCALAR_PH:.*]]:
-; VF2-NEXT: br label %[[LOOP:.*]]
-; VF2: [[LOOP]]:
-; VF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VF2-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i64 [[IV]]
-; VF2-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i64 [[IV]]
-; VF2-NEXT: [[L_A_0:%.*]] = load float, ptr [[GEP_A_0]], align 4
-; VF2-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_A_0]], i64 4
-; VF2-NEXT: [[L_A_1:%.*]] = load float, ptr [[GEP_A_1]], align 4
-; VF2-NEXT: [[L_B_0:%.*]] = load float, ptr [[GEP_B_0]], align 4
-; VF2-NEXT: [[ADD_0:%.*]] = fadd float [[L_A_0]], [[L_B_0]]
-; VF2-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_B_0]], i64 4
-; VF2-NEXT: [[L_B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4
-; VF2-NEXT: [[ADD_1:%.*]] = fadd float [[L_A_1]], [[L_B_1]]
-; VF2-NEXT: [[GEP_RES_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RES]], i64 [[IV]]
-; VF2-NEXT: store float [[ADD_0]], ptr [[GEP_RES_0]], align 4
-; VF2-NEXT: [[GEP_RES_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_RES_0]], i64 4
-; VF2-NEXT: store float [[ADD_1]], ptr [[GEP_RES_1]], align 4
-; VF2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; VF2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 4
-; VF2-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; VF2: [[EXIT]]:
; VF2-NEXT: ret void
;
@@ -282,27 +231,6 @@ define void @test_complex_add_float_tc_4(ptr %res, ptr noalias %A, ptr noalias %
; VF4-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; VF4: [[MIDDLE_BLOCK]]:
; VF4-NEXT: br label %[[EXIT:.*]]
-; VF4: [[SCALAR_PH:.*]]:
-; VF4-NEXT: br label %[[LOOP:.*]]
-; VF4: [[LOOP]]:
-; VF4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VF4-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i64 [[IV]]
-; VF4-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i64 [[IV]]
-; VF4-NEXT: [[L_A_0:%.*]] = load float, ptr [[GEP_A_0]], align 4
-; VF4-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_A_0]], i64 4
-; VF4-NEXT: [[L_A_1:%.*]] = load float, ptr [[GEP_A_1]], align 4
-; VF4-NEXT: [[L_B_0:%.*]] = load float, ptr [[GEP_B_0]], align 4
-; VF4-NEXT: [[ADD_0:%.*]] = fadd float [[L_A_0]], [[L_B_0]]
-; VF4-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_B_0]], i64 4
-; VF4-NEXT: [[L_B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4
-; VF4-NEXT: [[ADD_1:%.*]] = fadd float [[L_A_1]], [[L_B_1]]
-; VF4-NEXT: [[GEP_RES_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RES]], i64 [[IV]]
-; VF4-NEXT: store float [[ADD_0]], ptr [[GEP_RES_0]], align 4
-; VF4-NEXT: [[GEP_RES_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_RES_0]], i64 4
-; VF4-NEXT: store float [[ADD_1]], ptr [[GEP_RES_1]], align 4
-; VF4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; VF4-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 4
-; VF4-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; VF4: [[EXIT]]:
; VF4-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll
index a044ae8..d290f2d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll
@@ -27,21 +27,6 @@ define void @load_store_interleave_group(ptr noalias %data) {
; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[MUL_2:%.*]] = shl nsw i64 [[IV]], 1
-; CHECK-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[MUL_2]]
-; CHECK-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8
-; CHECK-NEXT: store i64 [[L_0]], ptr [[DATA_0]], align 8
-; CHECK-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[MUL_2]], 1
-; CHECK-NEXT: [[DATA_1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[ADD_1]]
-; CHECK-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8
-; CHECK-NEXT: store i64 [[L_1]], ptr [[DATA_1]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -105,25 +90,6 @@ define void @test_2xi64_with_wide_load(ptr noalias %data, ptr noalias %factor) {
; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[IV]]
-; CHECK-NEXT: [[L_FACTOR:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[TMP13:%.*]] = shl nsw i64 [[IV]], 1
-; CHECK-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP13]]
-; CHECK-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8
-; CHECK-NEXT: [[MUL_0:%.*]] = mul i64 [[L_FACTOR]], [[L_0]]
-; CHECK-NEXT: store i64 [[MUL_0]], ptr [[DATA_0]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = or disjoint i64 [[TMP13]], 1
-; CHECK-NEXT: [[DATA_1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP14]]
-; CHECK-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8
-; CHECK-NEXT: [[MUL_1:%.*]] = mul i64 [[L_FACTOR]], [[L_1]]
-; CHECK-NEXT: store i64 [[MUL_1]], ptr [[DATA_1]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll
index edb9519..187edb5 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll
@@ -49,23 +49,6 @@ define void @test0(ptr noalias %M3, ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_INC1286_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[IF_THEN1165_US:%.*]]
-; CHECK: if.then1165.us:
-; CHECK-NEXT: [[INDVARS_IV1783:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT1784:%.*]], [[IF_THEN1165_US]] ]
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[INDVARS_IV1783]]
-; CHECK-NEXT: [[L_A:%.*]] = load i16, ptr [[GEP_A]], align 2
-; CHECK-NEXT: [[CONV1177_US:%.*]] = zext i16 [[L_A]] to i32
-; CHECK-NEXT: [[ADD1178_US:%.*]] = add nsw i32 [[CONV1177_US]], 10
-; CHECK-NEXT: [[CONV1179_US:%.*]] = trunc i32 [[ADD1178_US]] to i16
-; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV1783]]
-; CHECK-NEXT: [[L_B:%.*]] = load i64, ptr [[GEP_B]], align 8
-; CHECK-NEXT: [[IDXPROM1181_US:%.*]] = ashr exact i64 [[L_B]], 32
-; CHECK-NEXT: [[ARRAYIDX1185_US:%.*]] = getelementptr inbounds i16, ptr [[M3]], i64 [[IDXPROM1181_US]]
-; CHECK-NEXT: store i16 [[CONV1179_US]], ptr [[ARRAYIDX1185_US]], align 2
-; CHECK-NEXT: [[INDVARS_IV_NEXT1784]] = add nuw nsw i64 [[INDVARS_IV1783]], 1
-; CHECK-NEXT: [[EXITCOND1785:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT1784]], 16
-; CHECK-NEXT: br i1 [[EXITCOND1785]], label [[FOR_INC1286_LOOPEXIT]], label [[IF_THEN1165_US]]
; CHECK: for.inc1286.loopexit:
; CHECK-NEXT: ret void
;
@@ -141,24 +124,6 @@ define void @test1(ptr noalias %M3, ptr noalias %A, ptr noalias %B, ptr noalias
; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_INC1286_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[IF_THEN1165_US:%.*]]
-; CHECK: if.then1165.us:
-; CHECK-NEXT: [[INDVARS_IV1783:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT1784:%.*]], [[IF_THEN1165_US]] ]
-; CHECK-NEXT: [[FPTR:%.*]] = load i32, ptr [[C]], align 4
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[INDVARS_IV1783]]
-; CHECK-NEXT: [[L_A:%.*]] = load i16, ptr [[GEP_A]], align 2
-; CHECK-NEXT: [[CONV1177_US:%.*]] = zext i16 [[L_A]] to i32
-; CHECK-NEXT: [[ADD1178_US:%.*]] = add nsw i32 [[CONV1177_US]], [[FPTR]]
-; CHECK-NEXT: [[CONV1179_US:%.*]] = trunc i32 [[ADD1178_US]] to i16
-; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV1783]]
-; CHECK-NEXT: [[L_B:%.*]] = load i64, ptr [[GEP_B]], align 8
-; CHECK-NEXT: [[IDXPROM1181_US:%.*]] = ashr exact i64 [[L_B]], 32
-; CHECK-NEXT: [[ARRAYIDX1185_US:%.*]] = getelementptr inbounds i16, ptr [[M3]], i64 [[IDXPROM1181_US]]
-; CHECK-NEXT: store i16 [[CONV1179_US]], ptr [[ARRAYIDX1185_US]], align 2
-; CHECK-NEXT: [[INDVARS_IV_NEXT1784]] = add nuw nsw i64 [[INDVARS_IV1783]], 1
-; CHECK-NEXT: [[EXITCOND1785:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT1784]], 16
-; CHECK-NEXT: br i1 [[EXITCOND1785]], label [[FOR_INC1286_LOOPEXIT]], label [[IF_THEN1165_US]]
; CHECK: for.inc1286.loopexit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll b/llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll
index c8eecd7..96a25a8 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll
@@ -127,7 +127,8 @@ define void @test(ptr noalias %src, ptr noalias %dst) {
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll b/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll
index d5d0c14..bc9cf4f 100644
--- a/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll
+++ b/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll
@@ -23,11 +23,7 @@ define half @vectorize_v2f16_loop(ptr addrspace(1) noalias %s) {
; GFX9-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; GFX9-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; GFX9: middle.block:
-; GFX9-NEXT: br label [[FOR_END:%.*]]
-; GFX9: scalar.ph:
; GFX9-NEXT: br label [[FOR_BODY:%.*]]
-; GFX9: for.body:
-; GFX9-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; GFX9: for.end:
; GFX9-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP3]], [[TMP2]]
; GFX9-NEXT: [[ADD_LCSSA:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> [[BIN_RDX]])
@@ -52,11 +48,7 @@ define half @vectorize_v2f16_loop(ptr addrspace(1) noalias %s) {
; VI-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; VI-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VI: middle.block:
-; VI-NEXT: br label [[FOR_END:%.*]]
-; VI: scalar.ph:
; VI-NEXT: br label [[FOR_BODY:%.*]]
-; VI: for.body:
-; VI-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; VI: for.end:
; VI-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP3]], [[TMP2]]
; VI-NEXT: [[ADD_LCSSA:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> [[BIN_RDX]])
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll b/llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll
index e83ac2e..58a24ee 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll
@@ -36,18 +36,6 @@ define void @f0(ptr noalias %dst, ptr readonly %src, i64 %n) #0 {
; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[TMP10]], 3
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i8 [[MUL]], ptr [[ARRAYIDX3]], align 1
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[FOR_END_LOOPEXIT]]:
; CHECK-NEXT: br label %[[FOR_END]]
; CHECK: [[FOR_END]]:
@@ -81,7 +69,4 @@ attributes #0 = { nofree norecurse nounwind "target-features"="+armv8.1-m.main,+
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]], [[META5:![0-9]+]]}
-; CHECK: [[META4]] = !{!"llvm.loop.vectorize.width", i32 16}
-; CHECK: [[META5]] = !{!"llvm.loop.interleave.count", i32 2}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll
index e52d85c..9a76019 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll
@@ -25,21 +25,7 @@ define void @test_stride1_4i32(ptr readonly %data, ptr noalias nocapture %dst, i
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 1
-; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
-; CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 5, [[TMP8]]
-; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[I_023]]
-; CHECK-NEXT: store i32 [[ADD7]], ptr [[ARRAYIDX9]], align 4
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_023]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[END]], label [[FOR_BODY]]
; CHECK: end:
; CHECK-NEXT: ret void
;
@@ -212,21 +198,7 @@ define void @test_stride3_4i32(ptr readonly %data, ptr noalias nocapture %dst, i
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 3
-; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
-; CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 5, [[TMP8]]
-; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[I_023]]
-; CHECK-NEXT: store i32 [[ADD7]], ptr [[ARRAYIDX9]], align 4
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_023]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[END]], label [[FOR_BODY]]
; CHECK: end:
; CHECK-NEXT: ret void
;
@@ -273,21 +245,7 @@ define void @test_stride4_4i32(ptr readonly %data, ptr noalias nocapture %dst, i
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 4
-; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2
-; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
-; CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 5, [[TMP8]]
-; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[I_023]]
-; CHECK-NEXT: store i32 [[ADD7]], ptr [[ARRAYIDX9]], align 4
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_023]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[END]], label [[FOR_BODY]]
; CHECK: end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
index 4cdfcf2..0a4ed7f 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
@@ -22,11 +22,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: ret i32 [[TMP2]]
;
@@ -75,11 +71,7 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: ret i32 [[TMP8]]
;
@@ -126,11 +118,7 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: [[PROD_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: ret i32 [[PROD_0_LCSSA]]
@@ -177,11 +165,7 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
@@ -228,11 +212,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
@@ -279,11 +259,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
@@ -330,11 +306,7 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP4]])
; CHECK-NEXT: ret float [[RESULT_0_LCSSA]]
@@ -381,11 +353,7 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP4]])
; CHECK-NEXT: ret float [[RESULT_0_LCSSA]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
index fc79227..029d8bd 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
@@ -34,28 +34,11 @@ define i32 @mla_i32(ptr noalias nocapture readonly %A, ptr noalias nocapture rea
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011]]
-; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP12]] to i32
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[I_011]]
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CONV2:%.*]] = sext i8 [[TMP13]] to i32
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
-; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[RES_010]]
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_011]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup.loopexit:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP10]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i32 [[RES_0_LCSSA]]
;
entry:
@@ -112,28 +95,11 @@ define i32 @mla_i8(ptr noalias nocapture readonly %A, ptr noalias nocapture read
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011]]
-; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP12]] to i32
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[I_011]]
-; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CONV2:%.*]] = sext i8 [[TMP13]] to i32
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
-; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[RES_010]]
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_011]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: for.cond.cleanup.loopexit:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP10]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i32 [[RES_0_LCSSA]]
;
entry:
@@ -183,25 +149,13 @@ define i32 @add_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP7]], [[R_07]]
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup.loopexit:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP5]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i32 [[R_0_LCSSA]]
;
entry:
@@ -245,26 +199,14 @@ define i32 @mul_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP3]], <4 x i32> [[VEC_PHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP4]])
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 1, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = mul nsw i32 [[TMP7]], [[R_07]]
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup.loopexit:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i32 [[R_0_LCSSA]]
;
entry:
@@ -308,26 +250,14 @@ define i32 @and_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP3]], <4 x i32> [[VEC_PHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP4]])
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ -1, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = and i32 [[TMP7]], [[R_07]]
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup.loopexit:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ -1, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ -1, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i32 [[R_0_LCSSA]]
;
entry:
@@ -371,26 +301,14 @@ define i32 @or_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP3]], <4 x i32> [[VEC_PHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP4]])
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = or i32 [[TMP7]], [[R_07]]
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup.loopexit:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i32 [[R_0_LCSSA]]
;
entry:
@@ -434,26 +352,14 @@ define i32 @xor_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP3]], <4 x i32> [[VEC_PHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP4]])
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = xor i32 [[TMP7]], [[R_07]]
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup.loopexit:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i32 [[R_0_LCSSA]]
;
entry:
@@ -497,26 +403,14 @@ define float @fadd_f32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4]] = select fast <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> [[TMP3]], <4 x float> [[VEC_PHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP4]])
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[I_08]]
-; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP7]], [[R_07]]
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup.loopexit:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ]
; CHECK-NEXT: ret float [[R_0_LCSSA]]
;
entry:
@@ -560,26 +454,14 @@ define float @fmul_f32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4]] = select fast <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> [[TMP3]], <4 x float> [[VEC_PHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP4]])
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ 1.000000e+00, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[I_08]]
-; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = fmul fast float [[TMP7]], [[R_07]]
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup.loopexit:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi float [ 1.000000e+00, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi float [ 1.000000e+00, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ]
; CHECK-NEXT: ret float [[R_0_LCSSA]]
;
entry:
@@ -622,7 +504,7 @@ define i32 @smin_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
@@ -640,7 +522,7 @@ define i32 @smin_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[ADD]] = select i1 [[C]], i32 [[R_07]], i32 [[TMP7]]
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: for.cond.cleanup.loopexit:
; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
@@ -689,7 +571,7 @@ define i32 @smax_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
@@ -707,7 +589,7 @@ define i32 @smax_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[ADD]] = select i1 [[C]], i32 [[R_07]], i32 [[TMP7]]
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: for.cond.cleanup.loopexit:
; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
@@ -756,7 +638,7 @@ define i32 @umin_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
@@ -774,7 +656,7 @@ define i32 @umin_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[ADD]] = select i1 [[C]], i32 [[R_07]], i32 [[TMP7]]
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: for.cond.cleanup.loopexit:
; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
@@ -823,7 +705,7 @@ define i32 @umax_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
@@ -841,7 +723,7 @@ define i32 @umax_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[ADD]] = select i1 [[C]], i32 [[R_07]], i32 [[TMP7]]
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: for.cond.cleanup.loopexit:
; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
index 3426fb1..6ea075f 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
@@ -30,17 +30,6 @@ define void @always_vectorize(ptr %p, i32 %x) {
; DEFAULT-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; DEFAULT: [[MIDDLE_BLOCK]]:
; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; DEFAULT: [[SCALAR_PH:.*]]:
-; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
-; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]]
-; DEFAULT-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
-; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4
-; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; DEFAULT: [[FOR_COND_CLEANUP]]:
; DEFAULT-NEXT: ret void
;
@@ -59,17 +48,6 @@ define void @always_vectorize(ptr %p, i32 %x) {
; OPTSIZE-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; OPTSIZE: [[MIDDLE_BLOCK]]:
; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; OPTSIZE: [[SCALAR_PH:.*]]:
-; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; OPTSIZE: [[FOR_BODY]]:
-; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; OPTSIZE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]]
-; OPTSIZE-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
-; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4
-; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; OPTSIZE: [[FOR_COND_CLEANUP]]:
; OPTSIZE-NEXT: ret void
;
@@ -88,17 +66,6 @@ define void @always_vectorize(ptr %p, i32 %x) {
; MINSIZE-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; MINSIZE: [[MIDDLE_BLOCK]]:
; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; MINSIZE: [[SCALAR_PH:.*]]:
-; MINSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; MINSIZE: [[FOR_BODY]]:
-; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; MINSIZE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]]
-; MINSIZE-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
-; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4
-; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; MINSIZE: [[FOR_COND_CLEANUP]]:
; MINSIZE-NEXT: ret void
;
@@ -386,23 +353,6 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n)
; DEFAULT-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; DEFAULT: [[MIDDLE_BLOCK]]:
; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; DEFAULT: [[SCALAR_PH:.*]]:
-; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
-; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; DEFAULT-NEXT: [[TMP72:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8
-; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP72]]
-; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP72]], 1
-; DEFAULT-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]]
-; DEFAULT-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]]
-; DEFAULT-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP72]], 2
-; DEFAULT-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]]
-; DEFAULT-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]]
-; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1
-; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15
-; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; DEFAULT: [[FOR_COND_CLEANUP]]:
; DEFAULT-NEXT: ret void
;
@@ -502,23 +452,6 @@ define void @dont_vectorize_with_minsize() {
; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; DEFAULT: [[MIDDLE_BLOCK]]:
; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; DEFAULT: [[SCALAR_PH:.*]]:
-; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
-; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]]
-; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
-; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16
-; DEFAULT-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]]
-; DEFAULT-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
-; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; DEFAULT: [[FOR_COND_CLEANUP]]:
; DEFAULT-NEXT: ret void
;
@@ -545,23 +478,6 @@ define void @dont_vectorize_with_minsize() {
; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; OPTSIZE: [[MIDDLE_BLOCK]]:
; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; OPTSIZE: [[SCALAR_PH:.*]]:
-; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; OPTSIZE: [[FOR_BODY]]:
-; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; OPTSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; OPTSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]]
-; OPTSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
-; OPTSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16
-; OPTSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]]
-; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
-; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; OPTSIZE: [[FOR_COND_CLEANUP]]:
; OPTSIZE-NEXT: ret void
;
@@ -588,23 +504,6 @@ define void @dont_vectorize_with_minsize() {
; MINSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; MINSIZE: [[MIDDLE_BLOCK]]:
; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; MINSIZE: [[SCALAR_PH:.*]]:
-; MINSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; MINSIZE: [[FOR_BODY]]:
-; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; MINSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; MINSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]]
-; MINSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
-; MINSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16
-; MINSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]]
-; MINSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
-; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
; MINSIZE: [[FOR_COND_CLEANUP]]:
; MINSIZE-NEXT: ret void
;
@@ -659,23 +558,6 @@ define void @vectorization_forced() {
; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; DEFAULT: [[MIDDLE_BLOCK]]:
; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; DEFAULT: [[SCALAR_PH:.*]]:
-; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
-; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]]
-; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; DEFAULT-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
-; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16
-; DEFAULT-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]]
-; DEFAULT-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
-; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; DEFAULT: [[FOR_COND_CLEANUP]]:
; DEFAULT-NEXT: ret void
;
@@ -702,23 +584,6 @@ define void @vectorization_forced() {
; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; OPTSIZE: [[MIDDLE_BLOCK]]:
; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; OPTSIZE: [[SCALAR_PH:.*]]:
-; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; OPTSIZE: [[FOR_BODY]]:
-; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; OPTSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; OPTSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]]
-; OPTSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; OPTSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
-; OPTSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16
-; OPTSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]]
-; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
-; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; OPTSIZE: [[FOR_COND_CLEANUP]]:
; OPTSIZE-NEXT: ret void
;
@@ -745,23 +610,6 @@ define void @vectorization_forced() {
; MINSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; MINSIZE: [[MIDDLE_BLOCK]]:
; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; MINSIZE: [[SCALAR_PH:.*]]:
-; MINSIZE-NEXT: br label %[[FOR_BODY:.*]]
-; MINSIZE: [[FOR_BODY]]:
-; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; MINSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; MINSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]]
-; MINSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; MINSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
-; MINSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16
-; MINSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]]
-; MINSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2
-; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; MINSIZE: [[FOR_COND_CLEANUP]]:
; MINSIZE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll b/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll
index 625f7a6..1ae71c8 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll
@@ -52,7 +52,7 @@ define dso_local void @predicate_loop_hint(ptr noalias nocapture %A, ptr noalias
; CHECK: %index.next = add nuw i64 %index, 4
; CHECK: br i1 %{{.*}}, label %{{.*}}, label %vector.body, !llvm.loop [[VEC_LOOP2:![0-9]+]]
;
-; CHECK: br i1 %{{.*}}, label %{{.*}}, label %for.body, !llvm.loop [[SCALAR_LOOP2:![0-9]+]]
+; CHECK-NOT: br i1 %{{.*}}, label %{{.*}}, label %for.body, !llvm.loop
entry:
br label %for.body
@@ -78,9 +78,6 @@ for.body:
; CHECK-NEXT: [[MD_RT_UNROLL_DIS]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK-NEXT: [[SCALAR_LOOP1]] = distinct !{[[SCALAR_LOOP1]], [[MD_RT_UNROLL_DIS]], [[MD_IS_VEC]]}
; CHECK-NEXT: [[VEC_LOOP2]] = distinct !{[[VEC_LOOP2]], [[MD_IS_VEC]], [[MD_RT_UNROLL_DIS]]}
-; CHECK-NEXT: [[SCALAR_LOOP2]] = distinct !{[[SCALAR_LOOP2]], [[ORIG_PRED_ENABLED:!.+]], [[ORIG_VEC_ENABLED:!.+]]}
-; CHECK-NEXT: [[ORIG_PRED_ENABLED]] = !{!"llvm.loop.vectorize.predicate.enable", i1 true}
-; CHECK-NEXT: [[ORIG_VEC_ENABLED]] = !{!"llvm.loop.vectorize.enable", i1 true}
!6 = distinct !{!6, !7, !8}
!7 = !{!"llvm.loop.vectorize.predicate.enable", i1 true}
diff --git a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll
index 0b13343..7afa8ce9 100644
--- a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll
+++ b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll
@@ -33,18 +33,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) {
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
-; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
index a7f0206..024194d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
@@ -46,19 +46,6 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFBFMIN-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; ZVFBFMIN: [[MIDDLE_BLOCK]]:
; ZVFBFMIN-NEXT: br label %[[EXIT:.*]]
-; ZVFBFMIN: [[SCALAR_PH:.*]]:
-; ZVFBFMIN-NEXT: br label %[[LOOP:.*]]
-; ZVFBFMIN: [[LOOP]]:
-; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; ZVFBFMIN-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]]
-; ZVFBFMIN-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]]
-; ZVFBFMIN-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2
-; ZVFBFMIN-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP]], align 2
-; ZVFBFMIN-NEXT: [[Z:%.*]] = fadd bfloat [[X]], [[Y]]
-; ZVFBFMIN-NEXT: store bfloat [[Z]], ptr [[A_GEP]], align 2
-; ZVFBFMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1
-; ZVFBFMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; ZVFBFMIN: [[EXIT]]:
; ZVFBFMIN-NEXT: ret void
;
@@ -155,23 +142,6 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64
; ZVFBFMIN-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; ZVFBFMIN: [[MIDDLE_BLOCK]]:
; ZVFBFMIN-NEXT: br label %[[EXIT:.*]]
-; ZVFBFMIN: [[SCALAR_PH:.*]]:
-; ZVFBFMIN-NEXT: br label %[[LOOP:.*]]
-; ZVFBFMIN: [[LOOP]]:
-; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; ZVFBFMIN-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]]
-; ZVFBFMIN-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]]
-; ZVFBFMIN-NEXT: [[C_GEP:%.*]] = getelementptr float, ptr [[C]], i64 [[I]]
-; ZVFBFMIN-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2
-; ZVFBFMIN-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP]], align 2
-; ZVFBFMIN-NEXT: [[Z:%.*]] = load float, ptr [[C_GEP]], align 4
-; ZVFBFMIN-NEXT: [[X_EXT:%.*]] = fpext bfloat [[X]] to float
-; ZVFBFMIN-NEXT: [[Y_EXT:%.*]] = fpext bfloat [[Y]] to float
-; ZVFBFMIN-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[X_EXT]], float [[Y_EXT]], float [[Z]])
-; ZVFBFMIN-NEXT: store float [[FMULADD]], ptr [[C_GEP]], align 4
-; ZVFBFMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1
-; ZVFBFMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; ZVFBFMIN: [[EXIT]]:
; ZVFBFMIN-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
index 612e7c0..2087218 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
@@ -33,24 +33,6 @@ define void @block_with_dead_inst_1(ptr %src, i64 %N) #0 {
; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
-; CHECK-NEXT: [[XOR]] = xor i16 0, 0
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
-; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0
-; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: [[DEAD_GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
-; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[TMP25]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -106,24 +88,6 @@ define void @block_with_dead_inst_2(ptr %src) #0 {
; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
-; CHECK-NEXT: [[XOR]] = xor i16 0, 0
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
-; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0
-; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[ELSE:.*]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: [[DEAD_GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -179,27 +143,6 @@ define void @multiple_blocks_with_dead_insts_3(ptr %src) #0 {
; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
-; CHECK-NEXT: [[XOR]] = xor i16 0, 0
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
-; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0
-; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -262,29 +205,6 @@ define void @multiple_blocks_with_dead_insts_4(ptr %src, i64 %N) #0 {
; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
-; CHECK-NEXT: [[XOR]] = xor i16 0, 0
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
-; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0
-; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: br label %[[THEN_1:.*]]
-; CHECK: [[THEN_1]]:
-; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -347,31 +267,6 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_5(ptr %src) #0 {
; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
-; CHECK-NEXT: [[XOR]] = xor i16 0, 0
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
-; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0
-; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: br label %[[THEN_1:.*]]
-; CHECK: [[THEN_1]]:
-; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: br label %[[ELSE_2:.*]]
-; CHECK: [[ELSE_2]]:
-; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -450,31 +345,6 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %
; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
-; CHECK-NEXT: [[XOR]] = xor i16 0, 0
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
-; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0
-; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: br i1 [[IC]], label %[[THEN_1:.*]], label %[[ELSE]]
-; CHECK: [[THEN_1]]:
-; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: br label %[[ELSE_2:.*]]
-; CHECK: [[ELSE_2]]:
-; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -537,24 +407,6 @@ define void @empty_block_with_phi_1(ptr %src, i64 %N) #0 {
; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
-; CHECK-NEXT: [[XOR]] = xor i32 0, 0
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
-; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0
-; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[P:%.*]] = phi i16 [ [[L]], %[[LOOP_HEADER]] ], [ 99, %[[THEN]] ]
-; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
-; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[TMP17]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -607,24 +459,6 @@ define void @empty_block_with_phi_2(ptr %src, i64 %N) #0 {
; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
-; CHECK-NEXT: [[XOR]] = xor i32 0, 0
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
-; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0
-; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[ELSE:.*]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[P:%.*]] = phi i16 [ [[L]], %[[LOOP_HEADER]] ], [ 99, %[[ELSE]] ]
-; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
-; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[TMP18]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
index 96c3a0d..10f8f74 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
@@ -42,16 +42,6 @@ define void @dead_load(ptr %p, i16 %start) {
; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[START_EXT]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV]], 111
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -326,21 +316,6 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) {
; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 false, label %[[LOOP_LATCH]], label %[[THEN:.*]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: [[NOT_A:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[P:%.*]] = phi i32 [ [[NOT_A]], %[[THEN]] ], [ 0, %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i32 [[P]], ptr [[GEP]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 9
-; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], 322
-; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -408,21 +383,6 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s
; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L_DEAD:%.*]] = load i8, ptr [[GEP_SRC_0]], align 1
-; CHECK-NEXT: [[IV_1:%.*]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_1]]
-; CHECK-NEXT: [[L_1:%.*]] = load i8, ptr [[GEP_SRC_1]], align 1
-; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[L_1]] to i32
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i32, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i32 [[EXT]], ptr [[GEP_DST]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 2
-; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll b/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll
index b6230dc..3fd90b2 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll
@@ -32,18 +32,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
-; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -86,21 +75,9 @@ define i64 @vector_add_reduce(ptr noalias nocapture %a) {
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP9]])
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]]
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
-; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i64 [[TMP11]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll
index d20dd05..01b4502 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll
@@ -29,18 +29,7 @@ define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]]
-; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -61,18 +50,7 @@ define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[FOR_END:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[FOR_BODY:%.*]]
-; FIXED: for.body:
-; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]]
-; FIXED-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; FIXED: for.end:
; FIXED-NEXT: ret void
;
@@ -113,20 +91,9 @@ define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]]
-; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -147,18 +114,7 @@ define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[FOR_END:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[FOR_BODY:%.*]]
-; FIXED: for.body:
-; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]]
-; FIXED-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; FIXED: for.end:
; FIXED-NEXT: ret void
;
@@ -199,20 +155,9 @@ define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[DIVREM:%.*]] = urem i64 [[ELEM]], [[V]]
-; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -233,18 +178,7 @@ define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[FOR_END:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[FOR_BODY:%.*]]
-; FIXED: for.body:
-; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[DIVREM:%.*]] = urem i64 [[ELEM]], [[V]]
-; FIXED-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; FIXED: for.end:
; FIXED-NEXT: ret void
;
@@ -285,20 +219,9 @@ define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[DIVREM:%.*]] = srem i64 [[ELEM]], [[V]]
-; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -319,18 +242,7 @@ define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[FOR_END:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[FOR_BODY:%.*]]
-; FIXED: for.body:
-; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[DIVREM:%.*]] = srem i64 [[ELEM]], [[V]]
-; FIXED-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; FIXED: for.end:
; FIXED-NEXT: ret void
;
@@ -379,26 +291,9 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0
-; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]]
-; CHECK: do_op:
-; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]]
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ]
-; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -422,24 +317,7 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[FOR_END:%.*]]
-; FIXED: scalar.ph:
-; FIXED-NEXT: br label [[FOR_BODY:%.*]]
-; FIXED: for.body:
-; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0
-; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]]
-; FIXED: do_op:
-; FIXED-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]]
-; FIXED-NEXT: br label [[LATCH]]
-; FIXED: latch:
-; FIXED-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ]
-; FIXED-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
+; FIXED-NEXT: br label [[LATCH:%.*]]
; FIXED: for.end:
; FIXED-NEXT: ret void
;
@@ -494,26 +372,9 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0
-; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]]
-; CHECK: do_op:
-; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]]
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ]
-; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -537,24 +398,7 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[FOR_END:%.*]]
-; FIXED: scalar.ph:
-; FIXED-NEXT: br label [[FOR_BODY:%.*]]
-; FIXED: for.body:
-; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0
-; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]]
-; FIXED: do_op:
-; FIXED-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]]
-; FIXED-NEXT: br label [[LATCH]]
-; FIXED: latch:
-; FIXED-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ]
-; FIXED-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
+; FIXED-NEXT: br label [[LATCH:%.*]]
; FIXED: for.end:
; FIXED-NEXT: ret void
;
@@ -601,26 +445,9 @@ define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP12]], [[INDEX]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42
-; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]]
-; CHECK: do_op:
-; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], 27
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ]
-; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -641,24 +468,7 @@ define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[FOR_END:%.*]]
-; FIXED: scalar.ph:
-; FIXED-NEXT: br label [[FOR_BODY:%.*]]
-; FIXED: for.body:
-; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42
-; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]]
-; FIXED: do_op:
-; FIXED-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], 27
-; FIXED-NEXT: br label [[LATCH]]
-; FIXED: latch:
-; FIXED-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ]
-; FIXED-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
+; FIXED-NEXT: br label [[LATCH:%.*]]
; FIXED: for.end:
; FIXED-NEXT: ret void
;
@@ -705,26 +515,9 @@ define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP12]], [[INDEX]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42
-; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]]
-; CHECK: do_op:
-; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], 27
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ]
-; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -745,24 +538,7 @@ define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[FOR_END:%.*]]
-; FIXED: scalar.ph:
-; FIXED-NEXT: br label [[FOR_BODY:%.*]]
-; FIXED: for.body:
-; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42
-; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]]
-; FIXED: do_op:
-; FIXED-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], 27
-; FIXED-NEXT: br label [[LATCH]]
-; FIXED: latch:
-; FIXED-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ]
-; FIXED-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
-; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
+; FIXED-NEXT: br label [[LATCH:%.*]]
; FIXED: for.end:
; FIXED-NEXT: ret void
;
@@ -815,26 +591,9 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[ELEM]], -128
-; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]]
-; CHECK: do_op:
-; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i8 [[ELEM]], -1
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[PHI:%.*]] = phi i8 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ]
-; CHECK-NEXT: store i8 [[PHI]], ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -856,24 +615,7 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) {
; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[FOR_END:%.*]]
-; FIXED: scalar.ph:
-; FIXED-NEXT: br label [[FOR_BODY:%.*]]
-; FIXED: for.body:
-; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]]
-; FIXED-NEXT: [[ELEM:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; FIXED-NEXT: [[C:%.*]] = icmp ne i8 [[ELEM]], -128
-; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]]
-; FIXED: do_op:
-; FIXED-NEXT: [[DIVREM:%.*]] = sdiv i8 [[ELEM]], -1
-; FIXED-NEXT: br label [[LATCH]]
-; FIXED: latch:
-; FIXED-NEXT: [[PHI:%.*]] = phi i8 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ]
-; FIXED-NEXT: store i8 [[PHI]], ptr [[ARRAYIDX]], align 1
-; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
+; FIXED-NEXT: br label [[LATCH:%.*]]
; FIXED: for.end:
; FIXED-NEXT: ret void
;
@@ -945,7 +687,7 @@ define i32 @udiv_sdiv_with_invariant_divisors(i8 %x, i16 %y, i1 %c) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i8> [[VEC_IND]], [[BROADCAST_SPLAT7]]
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP17:%.*]] = mul nuw i32 [[TMP16]], 2
@@ -972,7 +714,7 @@ define i32 @udiv_sdiv_with_invariant_divisors(i8 %x, i16 %y, i1 %c) {
; CHECK-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 0
; CHECK-NEXT: [[IV_NEXT_TRUNC]] = trunc i16 [[IV_NEXT]] to i8
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: [[MERGE_LCSSA:%.*]] = phi i32 [ [[MERGE]], [[LOOP_LATCH]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[MERGE_LCSSA]]
@@ -1004,28 +746,9 @@ define i32 @udiv_sdiv_with_invariant_divisors(i8 %x, i16 %y, i1 %c) {
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; FIXED: middle.block:
; FIXED-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[PREDPHI]], i32 3
-; FIXED-NEXT: br label [[EXIT:%.*]]
-; FIXED: scalar.ph:
-; FIXED-NEXT: br label [[LOOP_HEADER:%.*]]
-; FIXED: loop.header:
-; FIXED-NEXT: [[IV:%.*]] = phi i16 [ -12, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; FIXED-NEXT: [[NARROW_IV:%.*]] = phi i8 [ -12, [[SCALAR_PH]] ], [ [[IV_NEXT_TRUNC:%.*]], [[LOOP_LATCH]] ]
-; FIXED-NEXT: br i1 [[C]], label [[LOOP_LATCH]], label [[THEN:%.*]]
-; FIXED: then:
-; FIXED-NEXT: [[UD:%.*]] = udiv i8 [[NARROW_IV]], [[X]]
-; FIXED-NEXT: [[UD_EXT:%.*]] = zext i8 [[UD]] to i16
-; FIXED-NEXT: [[SD:%.*]] = sdiv i16 [[UD_EXT]], [[Y]]
-; FIXED-NEXT: [[SD_EXT:%.*]] = sext i16 [[SD]] to i32
-; FIXED-NEXT: br label [[LOOP_LATCH]]
-; FIXED: loop.latch:
-; FIXED-NEXT: [[MERGE:%.*]] = phi i32 [ 0, [[LOOP_HEADER]] ], [ [[SD_EXT]], [[THEN]] ]
-; FIXED-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], 1
-; FIXED-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 0
-; FIXED-NEXT: [[IV_NEXT_TRUNC]] = trunc i16 [[IV_NEXT]] to i8
-; FIXED-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]]
+; FIXED-NEXT: br label [[LOOP_LATCH:%.*]]
; FIXED: exit:
-; FIXED-NEXT: [[MERGE_LCSSA:%.*]] = phi i32 [ [[MERGE]], [[LOOP_LATCH]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
-; FIXED-NEXT: ret i32 [[MERGE_LCSSA]]
+; FIXED-NEXT: ret i32 [[TMP7]]
;
entry:
br label %loop.header
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
index 0a60556..21272cb 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
@@ -30,16 +30,7 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]]
-; CHECK-NEXT: store i64 [[IV1]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY1]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
@@ -84,18 +75,7 @@ define void @test_wide_ptr_induction(ptr noalias %a, ptr noalias %b, i64 %N) {
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[B]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[ADDR]], i64 8
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: store ptr [[ADDR]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
index a2ab7c4..143a51d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
@@ -46,19 +46,6 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFHMIN-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; ZVFHMIN: [[MIDDLE_BLOCK]]:
; ZVFHMIN-NEXT: br label %[[EXIT:.*]]
-; ZVFHMIN: [[SCALAR_PH:.*]]:
-; ZVFHMIN-NEXT: br label %[[LOOP:.*]]
-; ZVFHMIN: [[LOOP]]:
-; ZVFHMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; ZVFHMIN-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]]
-; ZVFHMIN-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]]
-; ZVFHMIN-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2
-; ZVFHMIN-NEXT: [[Y:%.*]] = load half, ptr [[B_GEP]], align 2
-; ZVFHMIN-NEXT: [[Z:%.*]] = fadd half [[X]], [[Y]]
-; ZVFHMIN-NEXT: store half [[Z]], ptr [[A_GEP]], align 2
-; ZVFHMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1
-; ZVFHMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; ZVFHMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; ZVFHMIN: [[EXIT]]:
; ZVFHMIN-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll
index 5df4f70..1c6954c 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll
@@ -116,17 +116,7 @@ define void @predicated_strided_store(ptr %start) {
; RVA23-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; RVA23-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; RVA23: middle.block:
-; RVA23-NEXT: br label [[EXIT:%.*]]
-; RVA23: scalar.ph:
; RVA23-NEXT: br label [[LOOP:%.*]]
-; RVA23: loop:
-; RVA23-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; RVA23-NEXT: [[TMP8:%.*]] = mul i64 [[IV]], 7
-; RVA23-NEXT: [[ADD_PTR:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP8]]
-; RVA23-NEXT: store i8 0, ptr [[ADD_PTR]], align 1
-; RVA23-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; RVA23-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 585
-; RVA23-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]]
; RVA23: exit:
; RVA23-NEXT: ret void
;
@@ -153,17 +143,7 @@ define void @predicated_strided_store(ptr %start) {
; RVA23ZVL1024B-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; RVA23ZVL1024B-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; RVA23ZVL1024B: middle.block:
-; RVA23ZVL1024B-NEXT: br label [[EXIT:%.*]]
-; RVA23ZVL1024B: scalar.ph:
; RVA23ZVL1024B-NEXT: br label [[LOOP:%.*]]
-; RVA23ZVL1024B: loop:
-; RVA23ZVL1024B-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; RVA23ZVL1024B-NEXT: [[TMP8:%.*]] = mul i64 [[IV]], 7
-; RVA23ZVL1024B-NEXT: [[ADD_PTR:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP8]]
-; RVA23ZVL1024B-NEXT: store i8 0, ptr [[ADD_PTR]], align 1
-; RVA23ZVL1024B-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; RVA23ZVL1024B-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 585
-; RVA23ZVL1024B-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]]
; RVA23ZVL1024B: exit:
; RVA23ZVL1024B-NEXT: ret void
;
@@ -216,21 +196,7 @@ define void @store_to_addr_generated_from_invariant_addr(ptr noalias %p0, ptr no
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr i32, ptr [[P1]], i64 [[IV]]
-; CHECK-NEXT: store ptr [[P0]], ptr [[ARRAYIDX11]], align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[P2]], align 4
-; CHECK-NEXT: [[BITS_TO_GO:%.*]] = getelementptr i8, ptr [[P3]], i64 [[TMP10]]
-; CHECK-NEXT: store i32 0, ptr [[BITS_TO_GO]], align 4
-; CHECK-NEXT: store i32 0, ptr [[BITS_TO_GO]], align 4
-; CHECK-NEXT: store i8 0, ptr [[BITS_TO_GO]], align 1
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
index 4d97a65..4ccec2c 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
@@ -153,21 +153,6 @@ define void @test_3_inductions(ptr noalias %dst, ptr noalias %src, i64 %n) #1 {
; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV_0:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[IV_0_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[IV_OR:%.*]] = or i32 [[IV_2]], [[IV_0]]
-; CHECK-NEXT: [[IV_OR_EXT:%.*]] = sext i32 [[IV_OR]] to i64
-; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_OR_EXT]]
-; CHECK-NEXT: store ptr [[GEP_SRC]], ptr [[DST]], align 8
-; CHECK-NEXT: [[IV_0_NEXT]] = add i32 [[IV_0]], 2
-; CHECK-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], 1
-; CHECK-NEXT: [[IV_2_NEXT]] = add i32 [[IV_2]], 2
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_1]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -228,27 +213,6 @@ define void @redundant_iv_trunc_for_cse(ptr noalias %src, ptr noalias %dst, i64
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4
-; CHECK-NEXT: [[C_0:%.*]] = icmp eq i32 [[L]], 0
-; CHECK-NEXT: [[TRUNC_IV:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-NEXT: br i1 [[C_0]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: [[TRUNC_IV_2:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-NEXT: [[SHL_IV:%.*]] = shl i32 [[TRUNC_IV_2]], 16
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[P:%.*]] = phi i32 [ [[SHL_IV]], %[[THEN]] ], [ [[TRUNC_IV]], %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[TRUNC_P:%.*]] = trunc i32 [[P]] to i8
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i8 [[TRUNC_P]], ptr [[GEP_DST]], align 1
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
index 63d1af38..7e6e45f 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
@@ -133,24 +133,11 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]])
-; IF-EVL-OUTLOOP-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; IF-EVL-OUTLOOP: scalar.ph:
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; IF-EVL-OUTLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
-; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
-; IF-EVL-OUTLOOP-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32
-; IF-EVL-OUTLOOP-NEXT: [[ADD]] = add nsw i32 [[R_07]], [[CONV]]
-; IF-EVL-OUTLOOP-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
-; IF-EVL-OUTLOOP-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
; IF-EVL-OUTLOOP: for.cond.cleanup.loopexit:
-; IF-EVL-OUTLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_COND_CLEANUP]]
; IF-EVL-OUTLOOP: for.cond.cleanup:
-; IF-EVL-OUTLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; IF-EVL-OUTLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP12]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: ret i32 [[R_0_LCSSA]]
;
; IF-EVL-INLOOP-LABEL: @add_i16_i32(
@@ -176,24 +163,11 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = icmp eq i32 [[AVL_NEXT]], 0
; IF-EVL-INLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
-; IF-EVL-INLOOP-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
-; IF-EVL-INLOOP: scalar.ph:
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; IF-EVL-INLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
-; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]]
-; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
-; IF-EVL-INLOOP-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32
-; IF-EVL-INLOOP-NEXT: [[ADD]] = add nsw i32 [[R_07]], [[CONV]]
-; IF-EVL-INLOOP-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
-; IF-EVL-INLOOP-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
-; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]]
; IF-EVL-INLOOP: for.cond.cleanup.loopexit:
-; IF-EVL-INLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_COND_CLEANUP]]
; IF-EVL-INLOOP: for.cond.cleanup:
-; IF-EVL-INLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ]
+; IF-EVL-INLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP11]], [[FOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: ret i32 [[R_0_LCSSA]]
;
entry:
@@ -330,22 +304,9 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP15]])
-; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL-OUTLOOP: scalar.ph:
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-OUTLOOP-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP19]], [[RDX]]
-; IF-EVL-OUTLOOP-NEXT: [[SMIN]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]]
-; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; IF-EVL-OUTLOOP: for.end:
-; IF-EVL-OUTLOOP-NEXT: [[SMIN_LCSSA:%.*]] = phi i32 [ [[SMIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-OUTLOOP-NEXT: ret i32 [[SMIN_LCSSA]]
+; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP18]]
;
; IF-EVL-INLOOP-LABEL: @smin(
; IF-EVL-INLOOP-NEXT: entry:
@@ -367,22 +328,9 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-INLOOP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
-; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL-INLOOP: scalar.ph:
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-INLOOP-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-INLOOP-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP16]], [[RDX]]
-; IF-EVL-INLOOP-NEXT: [[SMIN]] = select i1 [[CMP_I]], i32 [[TMP16]], i32 [[RDX]]
-; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; IF-EVL-INLOOP: for.end:
-; IF-EVL-INLOOP-NEXT: [[SMIN_LCSSA:%.*]] = phi i32 [ [[SMIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-INLOOP-NEXT: ret i32 [[SMIN_LCSSA]]
+; IF-EVL-INLOOP-NEXT: ret i32 [[RDX_MINMAX]]
;
; IF-EVL-LABEL: @smin(
; IF-EVL-NEXT: entry:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll
index 43560d2..31c8b74 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll
@@ -31,24 +31,7 @@ define void @load_store_factor2_i32(ptr %p) {
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
-; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
-; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; CHECK-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]]
-; CHECK-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4
-; CHECK-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2
-; CHECK-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -73,24 +56,7 @@ define void @load_store_factor2_i32(ptr %p) {
; FIXED-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[EXIT:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[LOOP:%.*]]
-; FIXED: loop:
-; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
-; FIXED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
-; FIXED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; FIXED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; FIXED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
-; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; FIXED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]]
-; FIXED-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4
-; FIXED-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2
-; FIXED-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4
-; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; FIXED: exit:
; FIXED-NEXT: ret void
;
@@ -121,24 +87,7 @@ define void @load_store_factor2_i32(ptr %p) {
; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: br label [[EXIT:%.*]]
-; SCALABLE: scalar.ph:
; SCALABLE-NEXT: br label [[LOOP:%.*]]
-; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
-; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
-; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; SCALABLE-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; SCALABLE-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
-; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]]
-; SCALABLE-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4
-; SCALABLE-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2
-; SCALABLE-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4
-; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -194,24 +143,7 @@ define void @load_store_factor2_i64(ptr %p) {
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -236,24 +168,7 @@ define void @load_store_factor2_i64(ptr %p) {
; FIXED-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[EXIT:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[LOOP:%.*]]
-; FIXED: loop:
-; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
-; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; FIXED: exit:
; FIXED-NEXT: ret void
;
@@ -284,24 +199,7 @@ define void @load_store_factor2_i64(ptr %p) {
; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: br label [[EXIT:%.*]]
-; SCALABLE: scalar.ph:
; SCALABLE-NEXT: br label [[LOOP:%.*]]
-; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
-; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -359,29 +257,7 @@ define void @load_store_factor3_i32(ptr %p) {
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
-; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
-; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; CHECK-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]]
-; CHECK-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4
-; CHECK-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2
-; CHECK-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4
-; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; CHECK-NEXT: [[Q2:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET2]]
-; CHECK-NEXT: [[X2:%.*]] = load i32, ptr [[Q2]], align 4
-; CHECK-NEXT: [[Y2:%.*]] = add i32 [[X2]], 3
-; CHECK-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -410,29 +286,7 @@ define void @load_store_factor3_i32(ptr %p) {
; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[EXIT:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[LOOP:%.*]]
-; FIXED: loop:
-; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
-; FIXED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
-; FIXED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; FIXED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; FIXED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
-; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; FIXED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]]
-; FIXED-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4
-; FIXED-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2
-; FIXED-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4
-; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; FIXED-NEXT: [[Q2:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET2]]
-; FIXED-NEXT: [[X2:%.*]] = load i32, ptr [[Q2]], align 4
-; FIXED-NEXT: [[Y2:%.*]] = add i32 [[X2]], 3
-; FIXED-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4
-; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; FIXED: exit:
; FIXED-NEXT: ret void
;
@@ -465,29 +319,7 @@ define void @load_store_factor3_i32(ptr %p) {
; SCALABLE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; SCALABLE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: br label [[EXIT:%.*]]
-; SCALABLE: scalar.ph:
; SCALABLE-NEXT: br label [[LOOP:%.*]]
-; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
-; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
-; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; SCALABLE-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; SCALABLE-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
-; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]]
-; SCALABLE-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4
-; SCALABLE-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2
-; SCALABLE-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4
-; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET2]]
-; SCALABLE-NEXT: [[X2:%.*]] = load i32, ptr [[Q2]], align 4
-; SCALABLE-NEXT: [[Y2:%.*]] = add i32 [[X2]], 3
-; SCALABLE-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4
-; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -551,29 +383,7 @@ define void @load_store_factor3_i64(ptr %p) {
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -602,29 +412,7 @@ define void @load_store_factor3_i64(ptr %p) {
; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[EXIT:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[LOOP:%.*]]
-; FIXED: loop:
-; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
-; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; FIXED: exit:
; FIXED-NEXT: ret void
;
@@ -657,29 +445,7 @@ define void @load_store_factor3_i64(ptr %p) {
; SCALABLE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; SCALABLE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: br label [[EXIT:%.*]]
-; SCALABLE: scalar.ph:
; SCALABLE-NEXT: br label [[LOOP:%.*]]
-; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
-; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -745,34 +511,7 @@ define void @load_store_factor4(ptr %p) {
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -803,34 +542,7 @@ define void @load_store_factor4(ptr %p) {
; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[EXIT:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[LOOP:%.*]]
-; FIXED: loop:
-; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4
-; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; FIXED: exit:
; FIXED-NEXT: ret void
;
@@ -865,34 +577,7 @@ define void @load_store_factor4(ptr %p) {
; SCALABLE-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; SCALABLE-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: br label [[EXIT:%.*]]
-; SCALABLE: scalar.ph:
; SCALABLE-NEXT: br label [[LOOP:%.*]]
-; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4
-; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -966,39 +651,7 @@ define void @load_store_factor5(ptr %p) {
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; CHECK-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1
-; CHECK-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]]
-; CHECK-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8
-; CHECK-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5
-; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -1033,39 +686,7 @@ define void @load_store_factor5(ptr %p) {
; FIXED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[EXIT:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[LOOP:%.*]]
-; FIXED: loop:
-; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5
-; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; FIXED-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1
-; FIXED-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]]
-; FIXED-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8
-; FIXED-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5
-; FIXED-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
-; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; FIXED: exit:
; FIXED-NEXT: ret void
;
@@ -1102,39 +723,7 @@ define void @load_store_factor5(ptr %p) {
; SCALABLE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; SCALABLE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: br label [[EXIT:%.*]]
-; SCALABLE: scalar.ph:
; SCALABLE-NEXT: br label [[LOOP:%.*]]
-; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5
-; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; SCALABLE-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1
-; SCALABLE-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]]
-; SCALABLE-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8
-; SCALABLE-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5
-; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
-; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -1216,44 +805,7 @@ define void @load_store_factor6(ptr %p) {
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; CHECK-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1
-; CHECK-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]]
-; CHECK-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8
-; CHECK-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5
-; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
-; CHECK-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1
-; CHECK-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]]
-; CHECK-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8
-; CHECK-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6
-; CHECK-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -1291,44 +843,7 @@ define void @load_store_factor6(ptr %p) {
; FIXED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[EXIT:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[LOOP:%.*]]
-; FIXED: loop:
-; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6
-; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; FIXED-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1
-; FIXED-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]]
-; FIXED-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8
-; FIXED-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5
-; FIXED-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
-; FIXED-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1
-; FIXED-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]]
-; FIXED-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8
-; FIXED-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6
-; FIXED-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
-; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; FIXED: exit:
; FIXED-NEXT: ret void
;
@@ -1367,44 +882,7 @@ define void @load_store_factor6(ptr %p) {
; SCALABLE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; SCALABLE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: br label [[EXIT:%.*]]
-; SCALABLE: scalar.ph:
; SCALABLE-NEXT: br label [[LOOP:%.*]]
-; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6
-; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; SCALABLE-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1
-; SCALABLE-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]]
-; SCALABLE-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8
-; SCALABLE-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5
-; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
-; SCALABLE-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1
-; SCALABLE-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]]
-; SCALABLE-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8
-; SCALABLE-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6
-; SCALABLE-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
-; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -1494,49 +972,7 @@ define void @load_store_factor7(ptr %p) {
; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; CHECK-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1
-; CHECK-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]]
-; CHECK-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8
-; CHECK-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5
-; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
-; CHECK-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1
-; CHECK-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]]
-; CHECK-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8
-; CHECK-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6
-; CHECK-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
-; CHECK-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1
-; CHECK-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]]
-; CHECK-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8
-; CHECK-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7
-; CHECK-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -1578,49 +1014,7 @@ define void @load_store_factor7(ptr %p) {
; FIXED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[EXIT:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[LOOP:%.*]]
-; FIXED: loop:
-; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7
-; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; FIXED-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1
-; FIXED-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]]
-; FIXED-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8
-; FIXED-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5
-; FIXED-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
-; FIXED-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1
-; FIXED-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]]
-; FIXED-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8
-; FIXED-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6
-; FIXED-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
-; FIXED-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1
-; FIXED-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]]
-; FIXED-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8
-; FIXED-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7
-; FIXED-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8
-; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; FIXED: exit:
; FIXED-NEXT: ret void
;
@@ -1661,49 +1055,7 @@ define void @load_store_factor7(ptr %p) {
; SCALABLE-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; SCALABLE-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: br label [[EXIT:%.*]]
-; SCALABLE: scalar.ph:
; SCALABLE-NEXT: br label [[LOOP:%.*]]
-; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7
-; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; SCALABLE-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1
-; SCALABLE-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]]
-; SCALABLE-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8
-; SCALABLE-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5
-; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
-; SCALABLE-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1
-; SCALABLE-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]]
-; SCALABLE-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8
-; SCALABLE-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6
-; SCALABLE-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
-; SCALABLE-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1
-; SCALABLE-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]]
-; SCALABLE-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8
-; SCALABLE-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7
-; SCALABLE-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8
-; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -1801,54 +1153,7 @@ define void @load_store_factor8(ptr %p) {
; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; CHECK-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1
-; CHECK-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]]
-; CHECK-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8
-; CHECK-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5
-; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
-; CHECK-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1
-; CHECK-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]]
-; CHECK-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8
-; CHECK-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6
-; CHECK-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
-; CHECK-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1
-; CHECK-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]]
-; CHECK-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8
-; CHECK-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7
-; CHECK-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8
-; CHECK-NEXT: [[OFFSET7:%.*]] = add i64 [[OFFSET6]], 1
-; CHECK-NEXT: [[Q7:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET7]]
-; CHECK-NEXT: [[X7:%.*]] = load i64, ptr [[Q7]], align 8
-; CHECK-NEXT: [[Y7:%.*]] = add i64 [[X7]], 8
-; CHECK-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -1891,54 +1196,7 @@ define void @load_store_factor8(ptr %p) {
; FIXED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[EXIT:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[LOOP:%.*]]
-; FIXED: loop:
-; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3
-; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; FIXED-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1
-; FIXED-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]]
-; FIXED-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8
-; FIXED-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5
-; FIXED-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
-; FIXED-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1
-; FIXED-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]]
-; FIXED-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8
-; FIXED-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6
-; FIXED-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
-; FIXED-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1
-; FIXED-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]]
-; FIXED-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8
-; FIXED-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7
-; FIXED-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8
-; FIXED-NEXT: [[OFFSET7:%.*]] = add i64 [[OFFSET6]], 1
-; FIXED-NEXT: [[Q7:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET7]]
-; FIXED-NEXT: [[X7:%.*]] = load i64, ptr [[Q7]], align 8
-; FIXED-NEXT: [[Y7:%.*]] = add i64 [[X7]], 8
-; FIXED-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8
-; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; FIXED: exit:
; FIXED-NEXT: ret void
;
@@ -1981,54 +1239,7 @@ define void @load_store_factor8(ptr %p) {
; SCALABLE-NEXT: [[TMP25:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; SCALABLE-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: br label [[EXIT:%.*]]
-; SCALABLE: scalar.ph:
; SCALABLE-NEXT: br label [[LOOP:%.*]]
-; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3
-; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1
-; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2
-; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1
-; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]]
-; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8
-; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3
-; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
-; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1
-; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]]
-; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8
-; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4
-; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
-; SCALABLE-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1
-; SCALABLE-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]]
-; SCALABLE-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8
-; SCALABLE-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5
-; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
-; SCALABLE-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1
-; SCALABLE-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]]
-; SCALABLE-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8
-; SCALABLE-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6
-; SCALABLE-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
-; SCALABLE-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1
-; SCALABLE-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]]
-; SCALABLE-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8
-; SCALABLE-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7
-; SCALABLE-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8
-; SCALABLE-NEXT: [[OFFSET7:%.*]] = add i64 [[OFFSET6]], 1
-; SCALABLE-NEXT: [[Q7:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET7]]
-; SCALABLE-NEXT: [[X7:%.*]] = load i64, ptr [[Q7]], align 8
-; SCALABLE-NEXT: [[Y7:%.*]] = add i64 [[X7]], 8
-; SCALABLE-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8
-; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -2118,23 +1329,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
-; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; CHECK-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]]
-; CHECK-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4
-; CHECK-NEXT: [[RES:%.*]] = add i32 [[X0]], [[X1]]
-; CHECK-NEXT: [[DST:%.*]] = getelementptr i32, ptr [[Q]], i64 [[I]]
-; CHECK-NEXT: store i32 [[RES]], ptr [[DST]], align 4
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -2157,23 +1352,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[EXIT:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[LOOP:%.*]]
-; FIXED: loop:
-; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
-; FIXED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
-; FIXED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; FIXED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]]
-; FIXED-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4
-; FIXED-NEXT: [[RES:%.*]] = add i32 [[X0]], [[X1]]
-; FIXED-NEXT: [[DST:%.*]] = getelementptr i32, ptr [[Q]], i64 [[I]]
-; FIXED-NEXT: store i32 [[RES]], ptr [[DST]], align 4
-; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; FIXED: exit:
; FIXED-NEXT: ret void
;
@@ -2202,23 +1381,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; SCALABLE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: br label [[EXIT:%.*]]
-; SCALABLE: scalar.ph:
; SCALABLE-NEXT: br label [[LOOP:%.*]]
-; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
-; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
-; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]]
-; SCALABLE-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4
-; SCALABLE-NEXT: [[RES:%.*]] = add i32 [[X0]], [[X1]]
-; SCALABLE-NEXT: [[DST:%.*]] = getelementptr i32, ptr [[Q]], i64 [[I]]
-; SCALABLE-NEXT: store i32 [[RES]], ptr [[DST]], align 4
-; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -2273,23 +1436,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; CHECK-NEXT: [[RES:%.*]] = add i64 [[X0]], [[X1]]
-; CHECK-NEXT: [[DST:%.*]] = getelementptr i64, ptr [[Q]], i64 [[I]]
-; CHECK-NEXT: store i64 [[RES]], ptr [[DST]], align 8
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -2312,23 +1459,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; FIXED: middle.block:
-; FIXED-NEXT: br label [[EXIT:%.*]]
-; FIXED: scalar.ph:
; FIXED-NEXT: br label [[LOOP:%.*]]
-; FIXED: loop:
-; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
-; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; FIXED-NEXT: [[RES:%.*]] = add i64 [[X0]], [[X1]]
-; FIXED-NEXT: [[DST:%.*]] = getelementptr i64, ptr [[Q]], i64 [[I]]
-; FIXED-NEXT: store i64 [[RES]], ptr [[DST]], align 8
-; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; FIXED: exit:
; FIXED-NEXT: ret void
;
@@ -2357,23 +1488,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; SCALABLE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: br label [[EXIT:%.*]]
-; SCALABLE: scalar.ph:
; SCALABLE-NEXT: br label [[LOOP:%.*]]
-; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
-; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
-; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
-; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1
-; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]]
-; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8
-; SCALABLE-NEXT: [[RES:%.*]] = add i64 [[X0]], [[X1]]
-; SCALABLE-NEXT: [[DST:%.*]] = getelementptr i64, ptr [[Q]], i64 [[I]]
-; SCALABLE-NEXT: store i64 [[RES]], ptr [[DST]], align 8
-; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
index a30aebb..ef0f0cf 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
@@ -96,7 +96,8 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; PREDICATED_DATA: middle.block:
; PREDICATED_DATA-NEXT: br label [[FOR_END:%.*]]
-; PREDICATED_DATA: scalar.ph:
+; PREDICATED_DATA: for.end:
+; PREDICATED_DATA-NEXT: ret void
;
; PREDICATED_DATA-WITH-EVL-LABEL: define void @masked_strided_factor2
; PREDICATED_DATA-WITH-EVL-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -135,9 +136,13 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.store.nxv32i8.p0(<vscale x 32 x i8> [[INTERLEAVED_VEC]], ptr align 1 [[TMP10]], <vscale x 32 x i1> [[INTERLEAVED_MASK4]], i32 [[INTERLEAVE_EVL3]])
; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]]
; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP12:%.*]] = icmp eq i32 [[AVL_NEXT]], 0
+; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; PREDICATED_DATA-WITH-EVL: middle.block:
; PREDICATED_DATA-WITH-EVL-NEXT: br label [[FOR_END:%.*]]
-; PREDICATED_DATA-WITH-EVL: scalar.ph:
+; PREDICATED_DATA-WITH-EVL: for.end:
+; PREDICATED_DATA-WITH-EVL-NEXT: ret void
;
entry:
%conv = zext i8 %guard to i32
@@ -270,10 +275,11 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP1]]
; PREDICATED_DATA-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; PREDICATED_DATA-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; PREDICATED_DATA-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; PREDICATED_DATA-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; PREDICATED_DATA: middle.block:
; PREDICATED_DATA-NEXT: br label [[FOR_END:%.*]]
-; PREDICATED_DATA: scalar.ph:
+; PREDICATED_DATA: for.end:
+; PREDICATED_DATA-NEXT: ret void
;
; PREDICATED_DATA-WITH-EVL-LABEL: define void @masked_strided_factor4
; PREDICATED_DATA-WITH-EVL-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0]] {
@@ -316,9 +322,13 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.store.nxv64i8.p0(<vscale x 64 x i8> [[INTERLEAVED_VEC]], ptr align 1 [[TMP15]], <vscale x 64 x i1> [[INTERLEAVED_MASK4]], i32 [[INTERLEAVE_EVL3]])
; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]]
; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP16:%.*]] = icmp eq i32 [[AVL_NEXT]], 0
+; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; PREDICATED_DATA-WITH-EVL: middle.block:
; PREDICATED_DATA-WITH-EVL-NEXT: br label [[FOR_END:%.*]]
-; PREDICATED_DATA-WITH-EVL: scalar.ph:
+; PREDICATED_DATA-WITH-EVL: for.end:
+; PREDICATED_DATA-WITH-EVL-NEXT: ret void
;
entry:
%conv = zext i8 %guard to i32
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll b/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll
index cf2f78b..328ee16 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll
@@ -62,18 +62,7 @@ define void @load_store(ptr %p) {
; LMUL2-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; LMUL2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; LMUL2: middle.block:
-; LMUL2-NEXT: br label [[FOR_END:%.*]]
-; LMUL2: scalar.ph:
; LMUL2-NEXT: br label [[FOR_BODY:%.*]]
-; LMUL2: for.body:
-; LMUL2-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; LMUL2-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
-; LMUL2-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8
-; LMUL2-NEXT: [[W:%.*]] = add i64 [[V]], 1
-; LMUL2-NEXT: store i64 [[W]], ptr [[Q]], align 8
-; LMUL2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; LMUL2-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; LMUL2-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; LMUL2: for.end:
; LMUL2-NEXT: ret void
;
@@ -96,18 +85,7 @@ define void @load_store(ptr %p) {
; LMUL4-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; LMUL4-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; LMUL4: middle.block:
-; LMUL4-NEXT: br label [[FOR_END:%.*]]
-; LMUL4: scalar.ph:
; LMUL4-NEXT: br label [[FOR_BODY:%.*]]
-; LMUL4: for.body:
-; LMUL4-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; LMUL4-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
-; LMUL4-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8
-; LMUL4-NEXT: [[W:%.*]] = add i64 [[V]], 1
-; LMUL4-NEXT: store i64 [[W]], ptr [[Q]], align 8
-; LMUL4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; LMUL4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; LMUL4-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; LMUL4: for.end:
; LMUL4-NEXT: ret void
;
@@ -130,18 +108,7 @@ define void @load_store(ptr %p) {
; LMUL8-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; LMUL8-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; LMUL8: middle.block:
-; LMUL8-NEXT: br label [[FOR_END:%.*]]
-; LMUL8: scalar.ph:
; LMUL8-NEXT: br label [[FOR_BODY:%.*]]
-; LMUL8: for.body:
-; LMUL8-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; LMUL8-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
-; LMUL8-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8
-; LMUL8-NEXT: [[W:%.*]] = add i64 [[V]], 1
-; LMUL8-NEXT: store i64 [[W]], ptr [[Q]], align 8
-; LMUL8-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; LMUL8-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; LMUL8-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; LMUL8: for.end:
; LMUL8-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
index 53907fa..8ef53ca 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
@@ -133,21 +133,7 @@ define void @trip8_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: call void @llvm.vp.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP7]], ptr align 1 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]])
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[I_08]]
-; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP15]], 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i64 [[I_08]]
-; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP16]]
-; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 8
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -186,21 +172,7 @@ define void @trip16_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP11]], ptr align 1 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP5]])
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I_08]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 [[I_08]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP8]]
-; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 16
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -240,21 +212,7 @@ define void @trip32_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP11]], ptr align 1 [[TMP4]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I_08]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 [[I_08]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP8]]
-; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 32
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -293,21 +251,7 @@ define void @trip24_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP7]], ptr align 1 [[DST]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[I_08]]
-; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP8]], 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[I_08]]
-; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP9]]
-; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 24
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
index ae6c90c..06b47aa 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
@@ -40,25 +40,7 @@ define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) {
; VLENUNK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; VLENUNK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VLENUNK: middle.block:
-; VLENUNK-NEXT: br label [[FOR_END:%.*]]
-; VLENUNK: scalar.ph:
-; VLENUNK-NEXT: br label [[FOR_BODY:%.*]]
-; VLENUNK: for.body:
-; VLENUNK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; VLENUNK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[IV]], 512
-; VLENUNK-NEXT: br i1 [[ICMP]], label [[DO_LOAD:%.*]], label [[LATCH]]
-; VLENUNK: do_load:
-; VLENUNK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; VLENUNK-NEXT: [[ELEM:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; VLENUNK-NEXT: br label [[LATCH]]
-; VLENUNK: latch:
-; VLENUNK-NEXT: [[PHI:%.*]] = phi i32 [ [[ELEM]], [[DO_LOAD]] ], [ 0, [[FOR_BODY]] ]
-; VLENUNK-NEXT: [[ADD:%.*]] = add i32 [[PHI]], [[V]]
-; VLENUNK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
-; VLENUNK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
-; VLENUNK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; VLENUNK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; VLENUNK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
+; VLENUNK-NEXT: br label [[LATCH:%.*]]
; VLENUNK: for.end:
; VLENUNK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
index e0bd8aa..0a9b1e0 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
@@ -108,7 +108,8 @@ define i32 @vqdot(ptr %a, ptr %b) #0 {
; FIXED-V-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP13]], [[TMP12]]
; FIXED-V-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]])
; FIXED-V-NEXT: br label [[FOR_EXIT:%.*]]
-; FIXED-V: scalar.ph:
+; FIXED-V: for.exit:
+; FIXED-V-NEXT: ret i32 [[TMP15]]
;
; FIXED-ZVQDOTQ-LABEL: define i32 @vqdot(
; FIXED-ZVQDOTQ-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -143,7 +144,8 @@ define i32 @vqdot(ptr %a, ptr %b) #0 {
; FIXED-ZVQDOTQ-NEXT: [[BIN_RDX:%.*]] = add <2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
; FIXED-ZVQDOTQ-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[BIN_RDX]])
; FIXED-ZVQDOTQ-NEXT: br label [[FOR_EXIT:%.*]]
-; FIXED-ZVQDOTQ: scalar.ph:
+; FIXED-ZVQDOTQ: for.exit:
+; FIXED-ZVQDOTQ-NEXT: ret i32 [[TMP13]]
;
entry:
br label %for.body
@@ -263,12 +265,13 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 {
; FIXED-V-NEXT: [[TMP13]] = add <8 x i32> [[TMP11]], [[VEC_PHI1]]
; FIXED-V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; FIXED-V: middle.block:
; FIXED-V-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP13]], [[TMP12]]
; FIXED-V-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]])
; FIXED-V-NEXT: br label [[FOR_EXIT:%.*]]
-; FIXED-V: scalar.ph:
+; FIXED-V: for.exit:
+; FIXED-V-NEXT: ret i32 [[TMP15]]
;
; FIXED-ZVQDOTQ-LABEL: define i32 @vqdotu(
; FIXED-ZVQDOTQ-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -298,12 +301,13 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 {
; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]])
; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-ZVQDOTQ-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; FIXED-ZVQDOTQ: middle.block:
; FIXED-ZVQDOTQ-NEXT: [[BIN_RDX:%.*]] = add <2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
; FIXED-ZVQDOTQ-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[BIN_RDX]])
; FIXED-ZVQDOTQ-NEXT: br label [[FOR_EXIT:%.*]]
-; FIXED-ZVQDOTQ: scalar.ph:
+; FIXED-ZVQDOTQ: for.exit:
+; FIXED-ZVQDOTQ-NEXT: ret i32 [[TMP13]]
;
entry:
br label %for.body
@@ -423,12 +427,13 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 {
; FIXED-V-NEXT: [[TMP13]] = add <8 x i32> [[TMP11]], [[VEC_PHI1]]
; FIXED-V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; FIXED-V: middle.block:
; FIXED-V-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP13]], [[TMP12]]
; FIXED-V-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]])
; FIXED-V-NEXT: br label [[FOR_EXIT:%.*]]
-; FIXED-V: scalar.ph:
+; FIXED-V: for.exit:
+; FIXED-V-NEXT: ret i32 [[TMP15]]
;
; FIXED-ZVQDOTQ-LABEL: define i32 @vqdotsu(
; FIXED-ZVQDOTQ-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -458,12 +463,13 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 {
; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]])
; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; FIXED-ZVQDOTQ: middle.block:
; FIXED-ZVQDOTQ-NEXT: [[BIN_RDX:%.*]] = add <2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
; FIXED-ZVQDOTQ-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[BIN_RDX]])
; FIXED-ZVQDOTQ-NEXT: br label [[FOR_EXIT:%.*]]
-; FIXED-ZVQDOTQ: scalar.ph:
+; FIXED-ZVQDOTQ: for.exit:
+; FIXED-ZVQDOTQ-NEXT: ret i32 [[TMP13]]
;
entry:
br label %for.body
@@ -582,12 +588,13 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 {
; FIXED-V-NEXT: [[TMP13]] = add <8 x i32> [[TMP11]], [[VEC_PHI1]]
; FIXED-V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; FIXED-V: middle.block:
; FIXED-V-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP13]], [[TMP12]]
; FIXED-V-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]])
; FIXED-V-NEXT: br label [[FOR_EXIT:%.*]]
-; FIXED-V: scalar.ph:
+; FIXED-V: for.exit:
+; FIXED-V-NEXT: ret i32 [[TMP15]]
;
; FIXED-ZVQDOTQ-LABEL: define i32 @vqdotsu2(
; FIXED-ZVQDOTQ-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -617,12 +624,13 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 {
; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]])
; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; FIXED-ZVQDOTQ: middle.block:
; FIXED-ZVQDOTQ-NEXT: [[BIN_RDX:%.*]] = add <2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
; FIXED-ZVQDOTQ-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[BIN_RDX]])
; FIXED-ZVQDOTQ-NEXT: br label [[FOR_EXIT:%.*]]
-; FIXED-ZVQDOTQ: scalar.ph:
+; FIXED-ZVQDOTQ: for.exit:
+; FIXED-ZVQDOTQ-NEXT: ret i32 [[TMP13]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
index 782c2f6..65928f8 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
@@ -49,30 +49,7 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64
; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[C_1:%.*]] = icmp ule i64 [[IV]], [[A]]
-; CHECK-NEXT: br i1 [[C_1]], label [[THEN_1:%.*]], label [[ELSE_1:%.*]]
-; CHECK: then.1:
-; CHECK-NEXT: [[C_2:%.*]] = icmp ule i64 [[IV]], [[B]]
-; CHECK-NEXT: br i1 [[C_2]], label [[ELSE_1]], label [[MERGE:%.*]]
-; CHECK: else.1:
-; CHECK-NEXT: [[C_3:%.*]] = icmp ule i64 [[IV]], [[C]]
-; CHECK-NEXT: br i1 [[C_3]], label [[THEN_2:%.*]], label [[LOOP_LATCH]]
-; CHECK: then.2:
-; CHECK-NEXT: br label [[MERGE]]
-; CHECK: merge:
-; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ poison, [[THEN_1]] ], [ [[IV]], [[THEN_2]] ]
-; CHECK-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr i16, ptr [[ARG]], i64 [[IDX]]
-; CHECK-NEXT: store i16 0, ptr [[GETELEMENTPTR]], align 2
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[IV]], 1000
-; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP_HEADER]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
index 3739f85..8d4d282 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
@@ -37,27 +37,7 @@ define void @test(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i32 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_COND]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT1:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_COND1:%.*]]
-; CHECK: for.cond:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH1:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY:%.*]] ]
-; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2
-; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48
-; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SHL]], 52
-; CHECK-NEXT: [[TRUNC_I32:%.*]] = trunc i64 [[ASHR]] to i32
-; CHECK-NEXT: br i1 [[CMP_SLT]], label [[COND_FALSE:%.*]], label [[FOR_BODY]]
-; CHECK: cond.false:
-; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[B]] to i32
-; CHECK-NEXT: br label [[FOR_BODY]]
-; CHECK: for.body:
-; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TRUNC_I32]], [[FOR_COND1]] ], [ [[ZEXT]], [[COND_FALSE]] ]
-; CHECK-NEXT: [[SHL_I32:%.*]] = shl i32 [[COND]], 8
-; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SHL_I32]] to i8
-; CHECK-NEXT: store i8 [[TRUNC]], ptr [[P]], align 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV]], 8
-; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND1]], label [[EXIT1]]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
index 9b6bc68..735fb769 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
@@ -29,20 +29,8 @@ define i32 @add(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP8]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP10]], [[SUM_07]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP11]]
;
entry:
br label %for.body
@@ -85,20 +73,8 @@ define i32 @sub(ptr %a, i64 %n) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP3]])
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 1024, %[[SCALAR_PH]] ], [ [[SUB:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[GEP]], align 4
-; CHECK-NEXT: [[SUB]] = sub i32 [[RDX]], [[X]]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[SUB_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[SUB_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
br label %loop
@@ -144,23 +120,8 @@ define i32 @addsub(ptr %a, ptr %b, i64 %n) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP5]])
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[SUB:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[GEP_A]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[RDX]], [[X]]
-; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i32, ptr [[B]], i64 [[IV]]
-; CHECK-NEXT: [[Y:%.*]] = load i32, ptr [[GEP_B]], align 4
-; CHECK-NEXT: [[SUB]] = sub i32 [[ADD]], [[Y]]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[SUB_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[LOOP]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[SUB_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP8]]
;
entry:
br label %loop
@@ -209,20 +170,8 @@ define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP8]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[OR:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[OR]] = or i32 [[TMP10]], [[SUM_07]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[OR_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP11]]
;
entry:
br label %for.body
@@ -267,20 +216,8 @@ define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP8]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[AND:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[AND]] = and i32 [[TMP10]], [[SUM_07]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[AND_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP11]]
;
entry:
br label %for.body
@@ -325,20 +262,8 @@ define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP8]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[XOR]] = xor i32 [[TMP10]], [[SUM_07]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[XOR_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP11]]
;
entry:
br label %for.body
@@ -384,21 +309,8 @@ define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP9]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP11]], [[SUM_010]]
-; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP12]]
;
entry:
br label %for.body
@@ -445,21 +357,8 @@ define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP9]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP11]], [[SUM_010]]
-; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP12]]
;
entry:
br label %for.body
@@ -505,20 +404,8 @@ define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP11:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP8]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP10]], [[SUM_07]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[ADD_LCSSA]]
+; CHECK-NEXT: ret float [[TMP11]]
;
entry:
br label %for.body
@@ -561,20 +448,8 @@ define half @fadd_fast_half_zvfh(ptr noalias nocapture readonly %a, i64 %n) "tar
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP11:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, <vscale x 8 x half> [[TMP8]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP10:%.*]] = load half, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP10]], [[SUM_07]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret half [[ADD_LCSSA]]
+; CHECK-NEXT: ret half [[TMP11]]
;
entry:
br label %for.body
@@ -744,21 +619,8 @@ define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP12:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP9]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt float [[TMP11]], [[SUM_07]]
-; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]]
+; CHECK-NEXT: ret float [[TMP12]]
;
entry:
br label %for.body
@@ -803,21 +665,8 @@ define half @fmin_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP12:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> [[TMP9]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt half [[TMP11]], [[SUM_07]]
-; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]]
+; CHECK-NEXT: ret half [[TMP12]]
;
entry:
br label %for.body
@@ -862,21 +711,8 @@ define bfloat @fmin_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP12:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16(<vscale x 8 x bfloat> [[TMP9]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ 0xR0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt bfloat [[TMP11]], [[SUM_07]]
-; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]]
+; CHECK-NEXT: ret bfloat [[TMP12]]
;
entry:
br label %for.body
@@ -923,21 +759,8 @@ define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP9]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt float [[TMP11]], [[SUM_07]]
-; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]]
+; CHECK-NEXT: ret float [[TMP12]]
;
entry:
br label %for.body
@@ -982,21 +805,8 @@ define half @fmax_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP12:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> [[TMP9]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt half [[TMP11]], [[SUM_07]]
-; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]]
+; CHECK-NEXT: ret half [[TMP12]]
;
entry:
br label %for.body
@@ -1041,21 +851,8 @@ define bfloat @fmax_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP12:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16(<vscale x 8 x bfloat> [[TMP9]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ 0xR0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt bfloat [[TMP11]], [[SUM_07]]
-; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]]
+; CHECK-NEXT: ret bfloat [[TMP12]]
;
entry:
br label %for.body
@@ -1243,22 +1040,8 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP16:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP9]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[SUM_07]])
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[MULADD_LCSSA]]
+; CHECK-NEXT: ret float [[TMP16]]
;
entry:
br label %for.body
@@ -1305,22 +1088,8 @@ define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh"
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP16:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, <vscale x 8 x half> [[TMP9]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[IV]]
-; CHECK-NEXT: [[TMP12:%.*]] = load half, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP11]], half [[TMP12]], half [[SUM_07]])
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret half [[MULADD_LCSSA]]
+; CHECK-NEXT: ret half [[TMP16]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll
index 93c0a74..850a6cb 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll
@@ -58,36 +58,6 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) {
; CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[WIDE_IV_0:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_0_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[WIDE_IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_1_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[WIDE_IV_2:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_2_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[WIDE_IV_0_SUB:%.*]] = sub i64 [[WIDE_IV_0]], 1
-; CHECK-NEXT: [[A_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_0_SUB]]
-; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[A_GEP0]], align 1
-; CHECK-NEXT: [[WIDE_IV_1_SUB:%.*]] = sub i64 [[WIDE_IV_1]], 1
-; CHECK-NEXT: [[B_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_1_SUB]]
-; CHECK-NEXT: [[B:%.*]] = load i8, ptr [[B_GEP0]], align 1
-; CHECK-NEXT: [[WIDE_IV_2_SUB:%.*]] = sub i64 [[WIDE_IV_2]], 1
-; CHECK-NEXT: [[C_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_2_SUB]]
-; CHECK-NEXT: [[C:%.*]] = load i8, ptr [[C_GEP0]], align 1
-; CHECK-NEXT: [[IV_MUL:%.*]] = mul i64 [[IV]], 3
-; CHECK-NEXT: [[BASE:%.*]] = getelementptr i8, ptr [[P1]], i64 [[IV_MUL]]
-; CHECK-NEXT: [[A_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 0
-; CHECK-NEXT: store i8 [[A]], ptr [[A_GEP1]], align 1
-; CHECK-NEXT: [[B_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 1
-; CHECK-NEXT: store i8 [[B]], ptr [[B_GEP1]], align 1
-; CHECK-NEXT: [[C_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 2
-; CHECK-NEXT: store i8 [[C]], ptr [[C_GEP1]], align 1
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[WIDE_IV_0_NEXT]] = add i64 [[WIDE_IV_0]], 2
-; CHECK-NEXT: [[WIDE_IV_1_NEXT]] = add i64 [[WIDE_IV_1]], 3
-; CHECK-NEXT: [[WIDE_IV_2_NEXT]] = add i64 [[WIDE_IV_2]], 4
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -145,36 +115,6 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) {
; NO-REG-PRESSURE-CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-REG-PRESSURE-CHECK: [[MIDDLE_BLOCK]]:
; NO-REG-PRESSURE-CHECK-NEXT: br label %[[EXIT:.*]]
-; NO-REG-PRESSURE-CHECK: [[SCALAR_PH:.*]]:
-; NO-REG-PRESSURE-CHECK-NEXT: br label %[[LOOP:.*]]
-; NO-REG-PRESSURE-CHECK: [[LOOP]]:
-; NO-REG-PRESSURE-CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_0:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_0_NEXT:%.*]], %[[LOOP]] ]
-; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_1_NEXT:%.*]], %[[LOOP]] ]
-; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_2:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_2_NEXT:%.*]], %[[LOOP]] ]
-; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_0_SUB:%.*]] = sub i64 [[WIDE_IV_0]], 1
-; NO-REG-PRESSURE-CHECK-NEXT: [[A_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_0_SUB]]
-; NO-REG-PRESSURE-CHECK-NEXT: [[A:%.*]] = load i8, ptr [[A_GEP0]], align 1
-; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_1_SUB:%.*]] = sub i64 [[WIDE_IV_1]], 1
-; NO-REG-PRESSURE-CHECK-NEXT: [[B_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_1_SUB]]
-; NO-REG-PRESSURE-CHECK-NEXT: [[B:%.*]] = load i8, ptr [[B_GEP0]], align 1
-; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_2_SUB:%.*]] = sub i64 [[WIDE_IV_2]], 1
-; NO-REG-PRESSURE-CHECK-NEXT: [[C_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_2_SUB]]
-; NO-REG-PRESSURE-CHECK-NEXT: [[C:%.*]] = load i8, ptr [[C_GEP0]], align 1
-; NO-REG-PRESSURE-CHECK-NEXT: [[IV_MUL:%.*]] = mul i64 [[IV]], 3
-; NO-REG-PRESSURE-CHECK-NEXT: [[BASE:%.*]] = getelementptr i8, ptr [[P1]], i64 [[IV_MUL]]
-; NO-REG-PRESSURE-CHECK-NEXT: [[A_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 0
-; NO-REG-PRESSURE-CHECK-NEXT: store i8 [[A]], ptr [[A_GEP1]], align 1
-; NO-REG-PRESSURE-CHECK-NEXT: [[B_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 1
-; NO-REG-PRESSURE-CHECK-NEXT: store i8 [[B]], ptr [[B_GEP1]], align 1
-; NO-REG-PRESSURE-CHECK-NEXT: [[C_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 2
-; NO-REG-PRESSURE-CHECK-NEXT: store i8 [[C]], ptr [[C_GEP1]], align 1
-; NO-REG-PRESSURE-CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_0_NEXT]] = add i64 [[WIDE_IV_0]], 2
-; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_1_NEXT]] = add i64 [[WIDE_IV_1]], 3
-; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_2_NEXT]] = add i64 [[WIDE_IV_2]], 4
-; NO-REG-PRESSURE-CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV]], 1024
-; NO-REG-PRESSURE-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; NO-REG-PRESSURE-CHECK: [[EXIT]]:
; NO-REG-PRESSURE-CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll
index 7b8404a..b80368d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll
@@ -21,18 +21,8 @@ define float @s311(float %a_0, float %s311_sum) {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi float [ [[S311_SUM]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED_NEXT]] = fadd float [[A_0]], [[RED]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1200
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[RED_LCSSA]]
+; CHECK-NEXT: ret float [[TMP6]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
index a165dde..5ca9bfd 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
@@ -53,10 +53,9 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) {
; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; RV64: [[MIDDLE_BLOCK]]:
-; RV64-NEXT: br [[EXIT:label %.*]]
-; RV64: [[SCALAR_PH:.*:]]
-; RV64-NEXT: br label %[[FOR_BODY:.*]]
-; RV64: [[FOR_BODY]]:
+; RV64-NEXT: br label %[[EXIT:.*]]
+; RV64: [[EXIT]]:
+; RV64-NEXT: ret void
;
; RV32-LABEL: define void @vector_reverse_i32(
; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -93,10 +92,9 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) {
; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; RV32: [[MIDDLE_BLOCK]]:
-; RV32-NEXT: br [[EXIT:label %.*]]
-; RV32: [[SCALAR_PH:.*:]]
-; RV32-NEXT: br label %[[FOR_BODY:.*]]
-; RV32: [[FOR_BODY]]:
+; RV32-NEXT: br label %[[EXIT:.*]]
+; RV32: [[EXIT]]:
+; RV32-NEXT: ret void
;
; RV64-UF2-LABEL: define void @vector_reverse_i32(
; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -718,10 +716,9 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) {
; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; RV64: [[MIDDLE_BLOCK]]:
-; RV64-NEXT: br [[EXIT:label %.*]]
-; RV64: [[SCALAR_PH:.*:]]
-; RV64-NEXT: br label %[[FOR_BODY:.*]]
-; RV64: [[FOR_BODY]]:
+; RV64-NEXT: br label %[[EXIT:.*]]
+; RV64: [[EXIT]]:
+; RV64-NEXT: ret void
;
; RV32-LABEL: define void @vector_reverse_f32_simplify(
; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] {
@@ -758,10 +755,9 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) {
; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; RV32: [[MIDDLE_BLOCK]]:
-; RV32-NEXT: br [[EXIT:label %.*]]
-; RV32: [[SCALAR_PH:.*:]]
-; RV32-NEXT: br label %[[FOR_BODY:.*]]
-; RV32: [[FOR_BODY]]:
+; RV32-NEXT: br label %[[EXIT:.*]]
+; RV32: [[EXIT]]:
+; RV32-NEXT: ret void
;
; RV64-UF2-LABEL: define void @vector_reverse_f32_simplify(
; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] {
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll
index ecde164..e046816 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll
@@ -28,19 +28,7 @@ define void @test(ptr %p) {
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
-; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200
-; CHECK-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
-; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -81,19 +69,7 @@ define void @test_may_clobber(ptr %p) {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
-; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 100
-; CHECK-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
-; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -137,19 +113,7 @@ define void @trivial_due_max_vscale(ptr %p) {
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
-; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192
-; CHECK-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
-; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -193,19 +157,7 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
-; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024
-; CHECK-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
-; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll
index 544ddc5..7330ce6 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll
@@ -27,18 +27,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
-; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -84,18 +73,7 @@ define void @vector_add_i32(ptr noalias nocapture %a, i32 %v, i64 %n) {
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[ELEM]], [[V]]
-; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -179,18 +157,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
-; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
-; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
-; CHECK-NEXT: store i64 [[V]], ptr [[AADDR]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -235,23 +202,9 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP9]])
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
-; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
-; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[AADDR]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]]
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
-; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i64 [[TMP11]]
;
entry:
br label %for.body
@@ -292,16 +245,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -340,16 +284,7 @@ define void @splat_ptr(ptr noalias nocapture %a, ptr %v, i64 %n) {
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: store ptr [[V]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
index a596c63..3c90908 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
@@ -28,18 +28,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
-; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -84,18 +73,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
-; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
-; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
-; CHECK-NEXT: store i64 [[V]], ptr [[AADDR]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -140,23 +118,9 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP11]])
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
-; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
-; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[AADDR]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]]
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
-; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i64 [[TMP14]]
;
entry:
br label %for.body
@@ -197,16 +161,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -246,17 +201,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -356,18 +301,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
-; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
index 4bfe9a4..8971b0c 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
@@ -29,21 +29,8 @@ define i32 @select_icmp(i32 %x, i32 %y, ptr nocapture readonly %c, i64 %n) {
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP12]], [[X]]
-; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[COND_LCSSA]]
+; CHECK-NEXT: ret i32 [[RDX_SELECT]]
;
entry:
br label %for.body
@@ -91,21 +78,8 @@ define i32 @select_fcmp(float %x, i32 %y, ptr nocapture readonly %c, i64 %n) {
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast olt float [[TMP12]], [[X]]
-; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[COND_LCSSA]]
+; CHECK-NEXT: ret i32 [[RDX_SELECT]]
;
entry:
br label %for.body
@@ -151,21 +125,8 @@ define i32 @select_const_i32_from_icmp(ptr nocapture readonly %v, i64 %n) {
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 7, i32 3
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ 3, %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
-; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3
-; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 7
-; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
-; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
-; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[DOTLCSSA]]
+; CHECK-NEXT: ret i32 [[RDX_SELECT]]
;
entry:
br label %for.body
@@ -211,21 +172,8 @@ define i32 @select_i32_from_icmp(ptr nocapture readonly %v, i32 %a, i32 %b, i64
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[B]], i32 [[A]]
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[A]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
-; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3
-; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 [[B]]
-; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
-; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
-; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[DOTLCSSA]]
+; CHECK-NEXT: ret i32 [[RDX_SELECT]]
;
entry:
br label %for.body
@@ -271,21 +219,8 @@ define i32 @select_const_i32_from_fcmp(ptr nocapture readonly %v, i64 %n) {
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 1, i32 2
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4
-; CHECK-NEXT: [[TMP16:%.*]] = fcmp fast ueq float [[TMP15]], 3.000000e+00
-; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 1
-; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
-; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
-; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[DOTLCSSA]]
+; CHECK-NEXT: ret i32 [[RDX_SELECT]]
;
entry:
br label %for.body
@@ -373,29 +308,8 @@ define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1
; CHECK-NEXT: [[TMP13:%.*]] = freeze i1 [[TMP12]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP13]], i32 1, i32 0
; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_INC:.*]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_012:%.*]] = phi i32 [ [[R_1:%.*]], %[[FOR_INC]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[I_013]]
-; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP14]], 35
-; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[FOR_INC]]
-; CHECK: [[IF_THEN]]:
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[SRC2]], i64 [[I_013]]
-; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[TMP15]], 2
-; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[CMP3]], i32 1, i32 [[R_012]]
-; CHECK-NEXT: br label %[[FOR_INC]]
-; CHECK: [[FOR_INC]]:
-; CHECK-NEXT: [[R_1]] = phi i32 [ [[R_012]], %[[FOR_BODY]] ], [ [[SPEC_SELECT]], %[[IF_THEN]] ]
-; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_013]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]]
; CHECK: [[FOR_END_LOOPEXIT]]:
-; CHECK-NEXT: [[R_1_LCSSA:%.*]] = phi i32 [ [[R_1]], %[[FOR_INC]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[R_1_LCSSA]]
+; CHECK-NEXT: ret i32 [[RDX_SELECT]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index ca1c710..2fbc73e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -31,19 +31,7 @@ define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH1:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], 8
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
-; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[SCALAR_PH]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -147,20 +135,7 @@ define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
-; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
-; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
-; CHECK-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], 64
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -264,19 +239,7 @@ define void @single_constant_stride_ptr_iv(ptr %p) {
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH1:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[P]], [[SCALAR_PH1]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4
-; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
-; CHECK-NEXT: store i32 [[Y0]], ptr [[PTR]], align 4
-; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 8
-; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[SCALAR_PH]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -1357,18 +1320,7 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; NOSTRIDED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; NOSTRIDED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
-; NOSTRIDED: scalar.ph:
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
-; NOSTRIDED: loop:
-; NOSTRIDED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; NOSTRIDED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[IN]], i64 [[IV]]
-; NOSTRIDED-NEXT: [[TMP8:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; NOSTRIDED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT]], i64 [[IV]]
-; NOSTRIDED-NEXT: store i64 [[TMP8]], ptr [[ARRAYIDX2]], align 8
-; NOSTRIDED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; NOSTRIDED: exit:
; NOSTRIDED-NEXT: ret void
;
@@ -1452,18 +1404,7 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) {
; STRIDED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; STRIDED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; STRIDED: middle.block:
-; STRIDED-NEXT: br label [[EXIT:%.*]]
-; STRIDED: scalar.ph:
; STRIDED-NEXT: br label [[LOOP:%.*]]
-; STRIDED: loop:
-; STRIDED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; STRIDED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[IN]], i64 [[IV]]
-; STRIDED-NEXT: [[TMP8:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; STRIDED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT]], i64 [[IV]]
-; STRIDED-NEXT: store i64 [[TMP8]], ptr [[ARRAYIDX2]], align 8
-; STRIDED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]]
; STRIDED: exit:
; STRIDED-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
index 6652fef..8ab0f6f 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
@@ -1206,17 +1206,6 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
-; IF-EVL: [[SCALAR_PH:.*]]:
-; IF-EVL-NEXT: br label %[[LOOP:.*]]
-; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[GEP]] to i64
-; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: store i64 [[TMP0]], ptr [[GEP2]], align 8
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
index 61f97aa..34a8275 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
@@ -43,23 +43,9 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP24:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP20]])
-; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL-OUTLOOP: scalar.ph:
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP27]], 3
-; IF-EVL-OUTLOOP-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[TMP27]], i32 0
-; IF-EVL-OUTLOOP-NEXT: [[ADD]] = add nsw i32 [[SELECT]], [[RDX]]
-; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL-OUTLOOP: for.end:
-; IF-EVL-OUTLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP24]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-OUTLOOP-NEXT: ret i32 [[ADD_LCSSA]]
+; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP24]]
;
; IF-EVL-INLOOP-LABEL: define i32 @cond_add(
; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -84,23 +70,9 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
-; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL-INLOOP: scalar.ph:
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP25]], 3
-; IF-EVL-INLOOP-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[TMP25]], i32 0
-; IF-EVL-INLOOP-NEXT: [[ADD]] = add nsw i32 [[SELECT]], [[RDX]]
-; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL-INLOOP: for.end:
-; IF-EVL-INLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-INLOOP-NEXT: ret i32 [[ADD_LCSSA]]
+; IF-EVL-INLOOP-NEXT: ret i32 [[TMP22]]
;
; NO-VP-OUTLOOP-LABEL: define i32 @cond_add(
; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -239,30 +211,12 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP23]]
; IF-EVL-OUTLOOP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PREDPHI]])
-; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL-OUTLOOP: scalar.ph:
-; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ]
-; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], 3
-; IF-EVL-OUTLOOP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[FOR_INC]]
-; IF-EVL-OUTLOOP: if.then:
-; IF-EVL-OUTLOOP-NEXT: [[ADD_PRED:%.*]] = add nsw i32 [[RDX]], [[TMP28]]
-; IF-EVL-OUTLOOP-NEXT: br label [[FOR_INC]]
-; IF-EVL-OUTLOOP: for.inc:
-; IF-EVL-OUTLOOP-NEXT: [[RDX_ADD]] = phi i32 [ [[ADD_PRED]], [[IF_THEN]] ], [ [[RDX]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]]
+; IF-EVL-OUTLOOP-NEXT: br label [[FOR_INC:%.*]]
; IF-EVL-OUTLOOP: for.end:
-; IF-EVL-OUTLOOP-NEXT: [[RDX_ADD_LCSSA:%.*]] = phi i32 [ [[RDX_ADD]], [[FOR_INC]] ], [ [[TMP27]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-OUTLOOP-NEXT: ret i32 [[RDX_ADD_LCSSA]]
+; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP27]]
;
; IF-EVL-INLOOP-LABEL: define i32 @cond_add_pred(
; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] {
@@ -284,29 +238,11 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP23]]
; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
-; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL-INLOOP: scalar.ph:
-; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ]
-; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP25]], 3
-; IF-EVL-INLOOP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[FOR_INC]]
-; IF-EVL-INLOOP: if.then:
-; IF-EVL-INLOOP-NEXT: [[ADD_PRED:%.*]] = add nsw i32 [[RDX]], [[TMP25]]
-; IF-EVL-INLOOP-NEXT: br label [[FOR_INC]]
-; IF-EVL-INLOOP: for.inc:
-; IF-EVL-INLOOP-NEXT: [[RDX_ADD]] = phi i32 [ [[ADD_PRED]], [[IF_THEN]] ], [ [[RDX]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]]
+; IF-EVL-INLOOP-NEXT: br label [[FOR_INC:%.*]]
; IF-EVL-INLOOP: for.end:
-; IF-EVL-INLOOP-NEXT: [[RDX_ADD_LCSSA:%.*]] = phi i32 [ [[RDX_ADD]], [[FOR_INC]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-INLOOP-NEXT: ret i32 [[RDX_ADD_LCSSA]]
+; IF-EVL-INLOOP-NEXT: ret i32 [[TMP22]]
;
; NO-VP-OUTLOOP-LABEL: define i32 @cond_add_pred(
; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] {
@@ -466,27 +402,12 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-OUTLOOP-NEXT: [[TMP21:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
-; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL-OUTLOOP: scalar.ph:
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP37:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-OUTLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
-; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP37]], [[IV_TRUNC]]
-; IF-EVL-OUTLOOP-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[TMP37]], i32 0
-; IF-EVL-OUTLOOP-NEXT: [[ADD]] = add nsw i32 [[SELECT]], [[RDX]]
-; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]]
; IF-EVL-OUTLOOP: for.end:
-; IF-EVL-OUTLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-OUTLOOP-NEXT: ret i32 [[ADD_LCSSA]]
+; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP22]]
;
; IF-EVL-INLOOP-LABEL: define i32 @step_cond_add(
; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] {
@@ -516,26 +437,11 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-INLOOP-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-INLOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; IF-EVL-INLOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
-; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL-INLOOP: scalar.ph:
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX1:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-INLOOP-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-INLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
-; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], [[IV_TRUNC]]
-; IF-EVL-INLOOP-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[TMP28]], i32 0
-; IF-EVL-INLOOP-NEXT: [[ADD1]] = add nsw i32 [[SELECT]], [[RDX1]]
-; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]]
; IF-EVL-INLOOP: for.end:
-; IF-EVL-INLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD1]], [[FOR_BODY]] ], [ [[ADD]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-INLOOP-NEXT: ret i32 [[ADD_LCSSA]]
+; IF-EVL-INLOOP-NEXT: ret i32 [[ADD]]
;
; NO-VP-OUTLOOP-LABEL: define i32 @step_cond_add(
; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] {
@@ -700,31 +606,12 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT7]] = add <vscale x 4 x i32> [[VEC_IND2]], [[BROADCAST_SPLAT2]]
; IF-EVL-OUTLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP24]])
-; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL-OUTLOOP: scalar.ph:
-; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ]
-; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
-; IF-EVL-OUTLOOP-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
-; IF-EVL-OUTLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32
-; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP38]], [[IV_TRUNC]]
-; IF-EVL-OUTLOOP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[MIDDLE_BLOCK]]
-; IF-EVL-OUTLOOP: if.then:
-; IF-EVL-OUTLOOP-NEXT: [[ADD_PRED:%.*]] = add nsw i32 [[BC_MERGE_RDX]], [[TMP38]]
-; IF-EVL-OUTLOOP-NEXT: br label [[MIDDLE_BLOCK]]
-; IF-EVL-OUTLOOP: for.inc:
-; IF-EVL-OUTLOOP-NEXT: [[RDX_ADD]] = phi i32 [ [[ADD_PRED]], [[IF_THEN]] ], [ [[BC_MERGE_RDX]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
-; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]]
+; IF-EVL-OUTLOOP-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; IF-EVL-OUTLOOP: for.end:
-; IF-EVL-OUTLOOP-NEXT: [[RDX_ADD_LCSSA:%.*]] = phi i32 [ [[RDX_ADD]], [[MIDDLE_BLOCK]] ], [ [[TMP27]], [[MIDDLE_BLOCK1]] ]
-; IF-EVL-OUTLOOP-NEXT: ret i32 [[RDX_ADD_LCSSA]]
+; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP27]]
;
; IF-EVL-INLOOP-LABEL: define i32 @step_cond_add_pred(
; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] {
@@ -753,30 +640,11 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-INLOOP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-INLOOP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-INLOOP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
-; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL-INLOOP: scalar.ph:
-; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ]
-; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-INLOOP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
-; IF-EVL-INLOOP-NEXT: [[TMP35:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
-; IF-EVL-INLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32
-; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP35]], [[IV_TRUNC]]
-; IF-EVL-INLOOP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[MIDDLE_BLOCK]]
-; IF-EVL-INLOOP: if.then:
-; IF-EVL-INLOOP-NEXT: [[ADD_PRED:%.*]] = add nsw i32 [[BC_MERGE_RDX]], [[TMP35]]
-; IF-EVL-INLOOP-NEXT: br label [[MIDDLE_BLOCK]]
-; IF-EVL-INLOOP: for.inc:
-; IF-EVL-INLOOP-NEXT: [[RDX_ADD]] = phi i32 [ [[ADD_PRED]], [[IF_THEN]] ], [ [[BC_MERGE_RDX]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
-; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]]
+; IF-EVL-INLOOP-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; IF-EVL-INLOOP: for.end:
-; IF-EVL-INLOOP-NEXT: [[RDX_ADD_LCSSA:%.*]] = phi i32 [ [[RDX_ADD]], [[MIDDLE_BLOCK]] ], [ [[TMP17]], [[MIDDLE_BLOCK1]] ]
-; IF-EVL-INLOOP-NEXT: ret i32 [[RDX_ADD_LCSSA]]
+; IF-EVL-INLOOP-NEXT: ret i32 [[TMP17]]
;
; NO-VP-OUTLOOP-LABEL: define i32 @step_cond_add_pred(
; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] {
@@ -931,20 +799,16 @@ for.end:
; IF-EVL-OUTLOOP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; IF-EVL-OUTLOOP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; IF-EVL-OUTLOOP: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; IF-EVL-OUTLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]}
-; IF-EVL-OUTLOOP: [[META4]] = !{!"llvm.loop.vectorize.enable", i1 true}
+; IF-EVL-OUTLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
+; IF-EVL-OUTLOOP: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
; IF-EVL-OUTLOOP: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
-; IF-EVL-OUTLOOP: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
-; IF-EVL-OUTLOOP: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]}
;.
; IF-EVL-INLOOP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; IF-EVL-INLOOP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; IF-EVL-INLOOP: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; IF-EVL-INLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]}
-; IF-EVL-INLOOP: [[META4]] = !{!"llvm.loop.vectorize.enable", i1 true}
+; IF-EVL-INLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
+; IF-EVL-INLOOP: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
; IF-EVL-INLOOP: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
-; IF-EVL-INLOOP: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
-; IF-EVL-INLOOP: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]}
;.
; NO-VP-OUTLOOP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; NO-VP-OUTLOOP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
index 22d216e..8cd540c 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
@@ -33,20 +33,6 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
-; IF-EVL: [[SCALAR_PH:.*]]:
-; IF-EVL-NEXT: br label %[[LOOP:.*]]
-; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
-; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8
-; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = load i64, ptr [[B_GEP]], align 8
-; IF-EVL-NEXT: [[TMP18:%.*]] = sdiv i64 [[TMP16]], [[TMP17]]
-; IF-EVL-NEXT: [[C_GEP:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV]]
-; IF-EVL-NEXT: store i64 [[TMP18]], ptr [[C_GEP]], align 8
-; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IF-EVL-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; IF-EVL-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
@@ -143,20 +129,6 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
-; IF-EVL: [[SCALAR_PH:.*]]:
-; IF-EVL-NEXT: br label %[[LOOP:.*]]
-; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
-; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8
-; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = load i64, ptr [[B_GEP]], align 8
-; IF-EVL-NEXT: [[TMP18:%.*]] = udiv i64 [[TMP16]], [[TMP17]]
-; IF-EVL-NEXT: [[C_GEP:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV]]
-; IF-EVL-NEXT: store i64 [[TMP18]], ptr [[C_GEP]], align 8
-; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IF-EVL-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; IF-EVL-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
@@ -252,20 +224,6 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
-; IF-EVL: [[SCALAR_PH:.*]]:
-; IF-EVL-NEXT: br label %[[LOOP:.*]]
-; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
-; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8
-; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = load i64, ptr [[B_GEP]], align 8
-; IF-EVL-NEXT: [[TMP18:%.*]] = srem i64 [[TMP16]], [[TMP17]]
-; IF-EVL-NEXT: [[C_GEP:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV]]
-; IF-EVL-NEXT: store i64 [[TMP18]], ptr [[C_GEP]], align 8
-; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IF-EVL-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; IF-EVL-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
@@ -361,20 +319,6 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[EXIT:.*]]
-; IF-EVL: [[SCALAR_PH:.*]]:
-; IF-EVL-NEXT: br label %[[LOOP:.*]]
-; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
-; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8
-; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = load i64, ptr [[B_GEP]], align 8
-; IF-EVL-NEXT: [[TMP18:%.*]] = urem i64 [[TMP16]], [[TMP17]]
-; IF-EVL-NEXT: [[C_GEP:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV]]
-; IF-EVL-NEXT: store i64 [[TMP18]], ptr [[C_GEP]], align 8
-; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IF-EVL-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; IF-EVL-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; IF-EVL: [[EXIT]]:
; IF-EVL-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
index b153328..c7ba826 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
@@ -42,19 +42,6 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FOR_END:.*]]
-; IF-EVL: [[SCALAR_PH:.*]]:
-; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
-; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP24:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]]
-; IF-EVL-NEXT: [[TMP24]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[TMP24]]
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]]
-; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[INDVARS]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL: [[FOR_END]]:
; IF-EVL-NEXT: ret void
;
@@ -167,23 +154,9 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FOR_END:.*]]
-; IF-EVL: [[SCALAR_PH:.*]]:
-; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
-; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP31:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ 22, %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]]
-; IF-EVL-NEXT: [[TMP31]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[FOR2]]
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]]
-; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[INDVARS]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3]]
; IF-EVL: [[FOR_END]]:
; IF-EVL-NEXT: ret void
;
@@ -316,25 +289,9 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP27]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP27]]
; IF-EVL-NEXT: [[TMP26:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FOR_END:.*]]
-; IF-EVL: [[SCALAR_PH:.*]]:
-; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
-; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP38:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ 22, %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR3:%.*]] = phi i32 [ 11, %[[SCALAR_PH]] ], [ [[FOR2]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]]
-; IF-EVL-NEXT: [[TMP38]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR2]], [[FOR3]]
-; IF-EVL-NEXT: [[ADD1:%.*]] = add i32 [[ADD]], [[FOR1]]
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]]
-; IF-EVL-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[INDVARS]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3]]
; IF-EVL: [[FOR_END]]:
; IF-EVL-NEXT: ret void
;
@@ -469,7 +426,7 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: store <vscale x 4 x i32> [[TMP11]], ptr [[TMP12]], align 4
; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS]], [[TMP3]]
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32()
; IF-EVL-NEXT: [[TMP15:%.*]] = mul nuw i32 [[TMP14]], 4
@@ -495,7 +452,7 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; IF-EVL: [[FOR_END]]:
; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = phi i32 [ [[FOR1]], %[[FOR_BODY]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ]
; IF-EVL-NEXT: ret i32 [[FOR1_LCSSA]]
@@ -613,20 +570,9 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) {
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
; IF-EVL-NEXT: br label %[[FOR_END:.*]]
-; IF-EVL: [[SCALAR_PH:.*]]:
-; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
-; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV1_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i64 [ 33, %[[SCALAR_PH]] ], [ [[TMP14:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP14]] = add i64 [[IV1]], 42
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[IV1]]
-; IF-EVL-NEXT: store i64 [[FOR1]], ptr [[ARRAYIDX]], align 8
-; IF-EVL-NEXT: [[IV1_NEXT]] = add nuw nsw i64 [[IV1]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV1_NEXT]], [[TC]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3]]
; IF-EVL: [[FOR_END]]:
; IF-EVL-NEXT: ret void
;
@@ -713,13 +659,11 @@ for.end:
; IF-EVL: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; IF-EVL: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; IF-EVL: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; IF-EVL: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]}
-; IF-EVL: [[META4]] = !{!"llvm.loop.vectorize.enable", i1 true}
+; IF-EVL: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
+; IF-EVL: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
; IF-EVL: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
-; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
+; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]}
; IF-EVL: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]}
-; IF-EVL: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]}
-; IF-EVL: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]}
;.
; NO-VP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; NO-VP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
index df550ec..b9a4e97 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
@@ -30,21 +30,9 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[TMP18]], [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[ADD_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP15]]
;
; NO-VP-LABEL: @add(
; NO-VP-NEXT: entry:
@@ -129,7 +117,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP5]] = mul i32 [[VEC_PHI1]], [[TMP4]]
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8
; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP5]], [[MUL]]
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
@@ -146,7 +134,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[MUL1]] = mul nsw i32 [[TMP0]], [[RDX1]]
; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP6:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: for.end:
; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL1]], [[FOR_BODY1]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]]
@@ -231,23 +219,11 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[OR]] = or i32 [[TMP18]], [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[OR_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP15]]
;
; NO-VP-LABEL: @or(
; NO-VP-NEXT: entry:
@@ -327,23 +303,11 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[AND]] = and i32 [[TMP18]], [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[AND_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP15]]
;
; NO-VP-LABEL: @and(
; NO-VP-NEXT: entry:
@@ -423,23 +387,11 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[XOR]] = xor i32 [[TMP18]], [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[XOR_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP15]]
;
; NO-VP-LABEL: @xor(
; NO-VP-NEXT: entry:
@@ -519,24 +471,11 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP17]], [[RDX]]
-; IF-EVL-NEXT: [[SMIN]] = select i1 [[CMP_I]], i32 [[TMP17]], i32 [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[SMIN_LCSSA:%.*]] = phi i32 [ [[SMIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[SMIN_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[RDX_MINMAX]]
;
; NO-VP-LABEL: @smin(
; NO-VP-NEXT: entry:
@@ -618,24 +557,11 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[TMP17]], [[RDX]]
-; IF-EVL-NEXT: [[SMAX]] = select i1 [[CMP_I]], i32 [[TMP17]], i32 [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[SMAX_LCSSA:%.*]] = phi i32 [ [[SMAX]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[SMAX_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[RDX_MINMAX]]
;
; NO-VP-LABEL: @smax(
; NO-VP-NEXT: entry:
@@ -717,24 +643,11 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[TMP17]], [[RDX]]
-; IF-EVL-NEXT: [[UMIN]] = select i1 [[CMP_I]], i32 [[TMP17]], i32 [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[UMIN_LCSSA:%.*]] = phi i32 [ [[UMIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[UMIN_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[RDX_MINMAX]]
;
; NO-VP-LABEL: @umin(
; NO-VP-NEXT: entry:
@@ -816,24 +729,11 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP17]], [[RDX]]
-; IF-EVL-NEXT: [[UMAX]] = select i1 [[CMP_I]], i32 [[TMP17]], i32 [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[UMAX_LCSSA:%.*]] = phi i32 [ [[UMAX]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[UMAX_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[RDX_MINMAX]]
;
; NO-VP-LABEL: @umax(
; NO-VP-NEXT: entry:
@@ -915,23 +815,11 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD]] = fadd reassoc float [[TMP18]], [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret float [[ADD_LCSSA]]
+; IF-EVL-NEXT: ret float [[TMP15]]
;
; NO-VP-LABEL: @fadd(
; NO-VP-NEXT: entry:
@@ -1016,7 +904,7 @@ define float @fmul(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP5]] = fmul reassoc float [[VEC_PHI1]], [[TMP4]]
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8
; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[BIN_RDX:%.*]] = fmul reassoc float [[TMP5]], [[MUL]]
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
@@ -1033,7 +921,7 @@ define float @fmul(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[MUL1]] = fmul reassoc float [[TMP0]], [[RDX1]]
; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP24:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: for.end:
; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL1]], [[FOR_BODY1]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
; IF-EVL-NEXT: ret float [[MUL_LCSSA]]
@@ -1119,24 +1007,11 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP17]], [[RDX]]
-; IF-EVL-NEXT: [[MIN]] = select i1 [[CMP]], float [[TMP17]], float [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX_SELECT]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret float [[MIN_LCSSA]]
+; IF-EVL-NEXT: ret float [[RDX_MINMAX_SELECT]]
;
; NO-VP-LABEL: @fmin(
; NO-VP-NEXT: entry:
@@ -1220,24 +1095,11 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast ogt float [[TMP17]], [[RDX]]
-; IF-EVL-NEXT: [[MAX]] = select i1 [[CMP]], float [[TMP17]], float [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY]] ], [ [[RDX_MINMAX_SELECT]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret float [[MAX_LCSSA]]
+; IF-EVL-NEXT: ret float [[RDX_MINMAX_SELECT]]
;
; NO-VP-LABEL: @fmax(
; NO-VP-NEXT: entry:
@@ -1324,7 +1186,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]])
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16
; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]])
; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]])
@@ -1342,7 +1204,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]])
; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP30:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: for.end:
; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
; IF-EVL-NEXT: ret float [[MIN_LCSSA]]
@@ -1432,7 +1294,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]])
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16
; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]])
; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]])
@@ -1450,7 +1312,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]])
; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP32:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP21:![0-9]+]]
; IF-EVL: for.end:
; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
; IF-EVL-NEXT: ret float [[MAX_LCSSA]]
@@ -1539,25 +1401,11 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP21]], float [[TMP22]], float [[RDX]])
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret float [[MULADD_LCSSA]]
+; IF-EVL-NEXT: ret float [[TMP18]]
;
; NO-VP-LABEL: @fmuladd(
; NO-VP-NEXT: entry:
@@ -1644,27 +1492,14 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP16]])
; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]]
; IF-EVL-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP20]], i32 [[INV:%.*]], i32 [[START:%.*]]
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP21]], 3
-; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[RDX_SELECT]]
;
; NO-VP-LABEL: @anyof_icmp(
; NO-VP-NEXT: entry:
@@ -1749,27 +1584,14 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP16]])
; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]]
; IF-EVL-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP20]], i32 [[INV:%.*]], i32 [[START:%.*]]
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP21]], 3.000000e+00
-; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[RDX_SELECT]]
;
; NO-VP-LABEL: @anyof_fcmp(
; NO-VP-NEXT: entry:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index 7c05f46..0c22a9e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -32,21 +32,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 0
-; IF-EVL-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 1
-; IF-EVL-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP12]]
-; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL: for.cond.cleanup:
; IF-EVL-NEXT: ret void
;
@@ -156,30 +142,12 @@ define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP12]])
-; IF-EVL-NEXT: br label [[EXIT:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 0
-; IF-EVL-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[RDX]], [[TMP16]]
-; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 1
-; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[TMP17]]
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 3
-; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[ADD2]] = add nsw i32 [[ADD1]], [[TMP18]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]]
; IF-EVL: exit:
-; IF-EVL-NEXT: [[ADD2_LCSSA:%.*]] = phi i32 [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[ADD2_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP15]]
;
; NO-VP-LABEL: @load_factor_4_with_gap(
; NO-VP-NEXT: entry:
@@ -299,22 +267,9 @@ define void @store_factor_4_with_gap(i32 %n, ptr noalias %a) {
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP6]]
; IF-EVL-NEXT: [[VEC_IND_NEXT5]] = add <vscale x 4 x i32> [[VEC_IND2]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP8:%.*]] = icmp eq i32 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[EXIT:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[TMP15:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 0
-; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 1
-; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX1]], align 4
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 3
-; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[TMP15]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]]
; IF-EVL: exit:
; IF-EVL-NEXT: ret void
;
@@ -427,30 +382,12 @@ define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP12]])
-; IF-EVL-NEXT: br label [[EXIT:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 0
-; IF-EVL-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[RDX]], [[TMP16]]
-; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 1
-; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[TMP17]]
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 2
-; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[ADD2]] = add nsw i32 [[ADD1]], [[TMP18]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]]
; IF-EVL: exit:
-; IF-EVL-NEXT: [[ADD2_LCSSA:%.*]] = phi i32 [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[ADD2_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP15]]
;
; NO-VP-LABEL: @load_factor_4_with_tail_gap(
; NO-VP-NEXT: entry:
@@ -571,22 +508,9 @@ define void @store_factor_4_with_tail_gap(i32 %n, ptr noalias %a) {
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP6]]
; IF-EVL-NEXT: [[VEC_IND_NEXT5]] = add <vscale x 4 x i32> [[VEC_IND2]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP8:%.*]] = icmp eq i32 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[EXIT:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[TMP15:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 0
-; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 1
-; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX1]], align 4
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 2
-; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[TMP15]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]]
; IF-EVL: exit:
; IF-EVL-NEXT: ret void
;
@@ -697,33 +621,12 @@ define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) {
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]])
-; IF-EVL-NEXT: br label [[EXIT:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[N]], [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ADD3:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 0
-; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[RDX]], [[TMP20]]
-; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 1
-; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[TMP21]]
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 2
-; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[ADD2:%.*]] = add nsw i32 [[ADD1]], [[TMP22]]
-; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 3
-; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4
-; IF-EVL-NEXT: [[ADD3]] = add nsw i32 [[ADD2]], [[TMP23]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
-; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[IV_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[EXIT]]
; IF-EVL: exit:
-; IF-EVL-NEXT: [[ADD3_LCSSA:%.*]] = phi i32 [ [[ADD3]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[ADD3_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP19]]
;
; NO-VP-LABEL: @load_factor_4_reverse(
; NO-VP-NEXT: entry:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
index 00c88a46..1aea6aa 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
@@ -26,18 +26,7 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) {
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i32 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV1:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[IV1]]
-; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[IV1]]
-; IF-EVL-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX4]], align 4
-; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i32 [[IV1]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT1]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY1]]
; IF-EVL: for.cond.cleanup:
; IF-EVL-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
index a03b430..e94e64f 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
@@ -32,17 +32,6 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT_LOOPEXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]]
-; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8
-; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1
-; CHECK-NEXT: store i64 [[Y]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp uge i64 [[I_NEXT]], [[TC]]
-; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT_LOOPEXIT]], label %[[LOOP]]
; CHECK: [[EXIT_LOOPEXIT]]:
; CHECK-NEXT: br label %[[EXIT]]
; CHECK: [[EXIT]]:
@@ -92,17 +81,6 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT_LOOPEXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]]
-; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8
-; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1
-; CHECK-NEXT: store i64 [[Y]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[TC]]
-; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT_LOOPEXIT]], label %[[LOOP]]
; CHECK: [[EXIT_LOOPEXIT]]:
; CHECK-NEXT: br label %[[EXIT]]
; CHECK: [[EXIT]]:
@@ -152,17 +130,6 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT_LOOPEXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]]
-; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8
-; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1
-; CHECK-NEXT: store i64 [[Y]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[TC_ADD]]
-; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT_LOOPEXIT]], label %[[LOOP]]
; CHECK: [[EXIT_LOOPEXIT]]:
; CHECK-NEXT: br label %[[EXIT]]
; CHECK: [[EXIT]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
index 58b4c53..b13c671 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
@@ -30,25 +30,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[EXIT:%.*]]
-; IF-EVL: scalar.ph:
-; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I_011]]
-; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP1:%.*]] = icmp ne i32 [[TMP23]], 0
-; IF-EVL-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
-; IF-EVL: if.then:
-; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_011]]
-; IF-EVL-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4
-; IF-EVL-NEXT: [[ADD:%.*]] = add i32 [[TMP23]], [[TMP24]]
-; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX3]], align 4
-; IF-EVL-NEXT: br label [[FOR_INC]]
-; IF-EVL: for.inc:
-; IF-EVL-NEXT: [[INC]] = add nuw nsw i64 [[I_011]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]]
+; IF-EVL-NEXT: br label [[FOR_INC:%.*]]
; IF-EVL: exit:
; IF-EVL-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
index 6c487ab..dcb7bf4 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
@@ -29,21 +29,9 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) {
; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD]] = fadd float [[TMP17]], [[SUM_07]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret float [[ADD_LCSSA]]
+; IF-EVL-NEXT: ret float [[TMP14]]
;
; NO-VP-LABEL: @fadd(
; NO-VP-NEXT: entry:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
index e14ff7c..7179e7d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
@@ -30,21 +30,9 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]])
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[TMP18]], [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[ADD_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP17]]
;
; NO-VP-LABEL: @add(
; NO-VP-NEXT: entry:
@@ -129,7 +117,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP4]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]]
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16
; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP6:%.*]] = mul <8 x i32> [[TMP4]], [[TMP5]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP6]])
@@ -147,7 +135,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[MUL]] = mul nsw i32 [[TMP0]], [[RDX]]
; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP6:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: for.end:
; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ]
; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]]
@@ -233,24 +221,12 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP14]])
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[OR]] = or i32 [[TMP18]], [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[OR_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP17]]
;
; NO-VP-LABEL: @or(
; NO-VP-NEXT: entry:
@@ -332,24 +308,12 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP14]])
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[AND]] = and i32 [[TMP18]], [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[AND_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP17]]
;
; NO-VP-LABEL: @and(
; NO-VP-NEXT: entry:
@@ -431,24 +395,12 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP14]])
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[XOR]] = xor i32 [[TMP18]], [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[XOR_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP17]]
;
; NO-VP-LABEL: @xor(
; NO-VP-NEXT: entry:
@@ -532,25 +484,12 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP15]])
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP19]], [[RDX]]
-; IF-EVL-NEXT: [[SMIN]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[SMIN_LCSSA:%.*]] = phi i32 [ [[SMIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[SMIN_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP18]]
;
; NO-VP-LABEL: @smin(
; NO-VP-NEXT: entry:
@@ -638,25 +577,12 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32> [[TMP15]])
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[TMP19]], [[RDX]]
-; IF-EVL-NEXT: [[SMAX]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[SMAX_LCSSA:%.*]] = phi i32 [ [[SMAX]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[SMAX_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP18]]
;
; NO-VP-LABEL: @smax(
; NO-VP-NEXT: entry:
@@ -744,25 +670,12 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32> [[TMP15]])
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[TMP19]], [[RDX]]
-; IF-EVL-NEXT: [[UMIN]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[UMIN_LCSSA:%.*]] = phi i32 [ [[UMIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[UMIN_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP18]]
;
; NO-VP-LABEL: @umin(
; NO-VP-NEXT: entry:
@@ -850,25 +763,12 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP15]])
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP19]], [[RDX]]
-; IF-EVL-NEXT: [[UMAX]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[UMAX_LCSSA:%.*]] = phi i32 [ [[UMAX]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[UMAX_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[TMP18]]
;
; NO-VP-LABEL: @umax(
; NO-VP-NEXT: entry:
@@ -954,24 +854,12 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP17:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP14]])
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ADD]] = fadd reassoc float [[TMP18]], [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret float [[ADD_LCSSA]]
+; IF-EVL-NEXT: ret float [[TMP17]]
;
; NO-VP-LABEL: @fadd(
; NO-VP-NEXT: entry:
@@ -1056,7 +944,7 @@ define float @fmul(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP4]] = fmul reassoc <8 x float> [[WIDE_LOAD2]], [[VEC_PHI1]]
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16
; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP6:%.*]] = fmul reassoc <8 x float> [[TMP4]], [[TMP5]]
; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v8f32(float 1.000000e+00, <8 x float> [[TMP6]])
@@ -1074,7 +962,7 @@ define float @fmul(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP0]], [[RDX]]
; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP24:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: for.end:
; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ]
; IF-EVL-NEXT: ret float [[MUL_LCSSA]]
@@ -1162,25 +1050,12 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP15]])
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP19]], [[RDX]]
-; IF-EVL-NEXT: [[MIN]] = select i1 [[CMP]], float [[TMP19]], float [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret float [[MIN_LCSSA]]
+; IF-EVL-NEXT: ret float [[TMP18]]
;
; NO-VP-LABEL: @fmin(
; NO-VP-NEXT: entry:
@@ -1268,25 +1143,12 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP15]])
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast ogt float [[TMP19]], [[RDX]]
-; IF-EVL-NEXT: [[MAX]] = select i1 [[CMP]], float [[TMP19]], float [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret float [[MAX_LCSSA]]
+; IF-EVL-NEXT: ret float [[TMP18]]
;
; NO-VP-LABEL: @fmax(
; NO-VP-NEXT: entry:
@@ -1375,7 +1237,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]])
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16
; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]])
; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]])
@@ -1393,7 +1255,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]])
; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP30:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: for.end:
; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
; IF-EVL-NEXT: ret float [[MIN_LCSSA]]
@@ -1483,7 +1345,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]])
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16
; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]])
; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]])
@@ -1501,7 +1363,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]])
; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP32:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP21:![0-9]+]]
; IF-EVL: for.end:
; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
; IF-EVL-NEXT: ret float [[MAX_LCSSA]]
@@ -1590,26 +1452,12 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP20:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP17]])
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP21]], float [[TMP22]], float [[RDX]])
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP20]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret float [[MULADD_LCSSA]]
+; IF-EVL-NEXT: ret float [[TMP20]]
;
; NO-VP-LABEL: @fmuladd(
; NO-VP-NEXT: entry:
@@ -1696,27 +1544,14 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP15]])
; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]]
; IF-EVL-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP19]], i32 [[INV:%.*]], i32 [[START:%.*]]
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP20]], 3
-; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[RDX_SELECT]]
;
; NO-VP-LABEL: @anyof_icmp(
; NO-VP-NEXT: entry:
@@ -1801,27 +1636,14 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP15]])
; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]]
; IF-EVL-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP19]], i32 [[INV:%.*]], i32 [[START:%.*]]
-; IF-EVL-NEXT: br label [[FOR_END:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP20]], 3.000000e+00
-; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]]
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
; IF-EVL: for.end:
-; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ]
-; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]]
+; IF-EVL-NEXT: ret i32 [[RDX_SELECT]]
;
; NO-VP-LABEL: @anyof_fcmp(
; NO-VP-NEXT: entry:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
index 5b9bc50..e70894b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
@@ -43,20 +43,7 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[LOOPEND:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1
-; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD]]
-; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4
-; IF-EVL-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]]
-; IF-EVL-NEXT: store i32 [[TMP]], ptr [[GEPS]], align 4
-; IF-EVL-NEXT: [[INC]] = add i32 [[I]], 1
-; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024
-; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND]]
; IF-EVL: loopend:
; IF-EVL-NEXT: ret void
;
@@ -179,27 +166,7 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; IF-EVL-NEXT: [[TMP29:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[LOOPEND:%.*]]
-; IF-EVL: scalar.ph:
-; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ]
-; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ]
-; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1
-; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[I]]
-; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4
-; IF-EVL-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP]], 100
-; IF-EVL-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
-; IF-EVL: if.then:
-; IF-EVL-NEXT: [[GEPL1:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i64 [[ADD]]
-; IF-EVL-NEXT: [[V:%.*]] = load i32, ptr [[GEPL1]], align 4
-; IF-EVL-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]]
-; IF-EVL-NEXT: store i32 [[V]], ptr [[GEPS]], align 4
-; IF-EVL-NEXT: br label [[FOR_INC]]
-; IF-EVL: for.inc:
-; IF-EVL-NEXT: [[INC]] = add i32 [[I]], 1
-; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024
-; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND]]
+; IF-EVL-NEXT: br label [[FOR_INC:%.*]]
; IF-EVL: loopend:
; IF-EVL-NEXT: ret void
;
@@ -351,22 +318,7 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr
; IF-EVL-NEXT: [[TMP32:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[EXIT:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[LOOP:%.*]]
-; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 1024, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; IF-EVL-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: [[X:%.*]] = load i8, ptr [[GEP_A]], align 1
-; IF-EVL-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i8 [[X]]
-; IF-EVL-NEXT: [[Y:%.*]] = load i8, ptr [[GEP_B]], align 1
-; IF-EVL-NEXT: [[GEP_C:%.*]] = getelementptr i8, ptr [[C]], i64 [[IV]]
-; IF-EVL-NEXT: store i8 [[Y]], ptr [[GEP_C]], align 1
-; IF-EVL-NEXT: [[GEP_D:%.*]] = getelementptr i8, ptr [[D]], i64 [[IV]]
-; IF-EVL-NEXT: store i8 [[Y]], ptr [[GEP_D]], align 1
-; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], -1
-; IF-EVL-NEXT: [[CMP_NOT:%.*]] = icmp eq i64 [[IV]], 0
-; IF-EVL-NEXT: br i1 [[CMP_NOT]], label [[EXIT]], label [[LOOP]]
; IF-EVL: exit:
; IF-EVL-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
index b13f97d..e1c62fe 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
@@ -31,19 +31,7 @@ define void @test(ptr %p) {
; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[EXIT:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[LOOP:%.*]]
-; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
-; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 8
-; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200
-; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
-; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 8
-; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]]
; IF-EVL: exit:
; IF-EVL-NEXT: ret void
;
@@ -125,19 +113,7 @@ define void @test_may_clobber1(ptr %p) {
; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[EXIT:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[LOOP:%.*]]
-; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
-; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
-; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 100
-; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
-; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 32
-; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]]
; IF-EVL: exit:
; IF-EVL-NEXT: ret void
;
@@ -157,19 +133,7 @@ define void @test_may_clobber1(ptr %p) {
; NO-VP-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; NO-VP: middle.block:
-; NO-VP-NEXT: br label [[EXIT:%.*]]
-; NO-VP: scalar.ph:
; NO-VP-NEXT: br label [[LOOP:%.*]]
-; NO-VP: loop:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; NO-VP-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
-; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
-; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 100
-; NO-VP-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
-; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32
-; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]]
; NO-VP: exit:
; NO-VP-NEXT: ret void
;
@@ -259,19 +223,7 @@ define void @test_may_clobber3(ptr %p) {
; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[EXIT:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[LOOP:%.*]]
-; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
-; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
-; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 10
-; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
-; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 32
-; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]]
; IF-EVL: exit:
; IF-EVL-NEXT: ret void
;
@@ -291,19 +243,7 @@ define void @test_may_clobber3(ptr %p) {
; NO-VP-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; NO-VP: middle.block:
-; NO-VP-NEXT: br label [[EXIT:%.*]]
-; NO-VP: scalar.ph:
; NO-VP-NEXT: br label [[LOOP:%.*]]
-; NO-VP: loop:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; NO-VP-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
-; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
-; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 10
-; NO-VP-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
-; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32
-; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]]
; NO-VP: exit:
; NO-VP-NEXT: ret void
;
@@ -347,19 +287,7 @@ define void @trivial_due_max_vscale(ptr %p) {
; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[EXIT:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[LOOP:%.*]]
-; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
-; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
-; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192
-; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
-; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 32
-; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]]
; IF-EVL: exit:
; IF-EVL-NEXT: ret void
;
@@ -446,19 +374,7 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[EXIT:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[LOOP:%.*]]
-; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
-; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
-; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024
-; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
-; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 32
-; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 3001
-; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]]
; IF-EVL: exit:
; IF-EVL-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
index 0bb7ad0..f804329 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
@@ -38,16 +38,6 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) {
; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[TMP22:%.*]] = sub nuw nsw i64 1, [[IV1]]
-; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP22]]
-; CHECK-NEXT: store i64 0, ptr [[ARRAYIDX14]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 3
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
index 5c89f21..c5319c6 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
@@ -31,20 +31,6 @@ define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8
-; CHECK-NEXT: [[EXT_L:%.*]] = zext i16 [[L]] to i64
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[X]], [[EXT_L]]
-; CHECK-NEXT: [[TRUNC_AND:%.*]] = trunc i64 [[AND]] to i8
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -95,20 +81,6 @@ define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8
-; CHECK-NEXT: [[EXT_L:%.*]] = sext i16 [[L]] to i64
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[X]], [[EXT_L]]
-; CHECK-NEXT: [[TRUNC_AND:%.*]] = trunc i64 [[AND]] to i8
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -151,21 +123,6 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 {
; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[F_039:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = or i8 23, [[X]]
-; CHECK-NEXT: [[EXTRACT_T:%.*]] = trunc i8 [[TMP4]] to i1
-; CHECK-NEXT: br i1 [[EXTRACT_T]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: store i8 0, ptr [[DST]], align 1
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[ADD]] = add i8 [[F_039]], 1
-; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[F_039]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], 8
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -260,23 +217,6 @@ define void @icmp_only_first_op_truncated(ptr noalias %dst, i32 %x, i64 %N, i64
; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[T1:%.*]] = trunc i64 [[N]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[T1]], [[T]]
-; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[X]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr double, ptr [[SRC]], i64 [[IDXPROM]]
-; CHECK-NEXT: [[RETVAL:%.*]] = load double, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: store double [[RETVAL]], ptr [[DST]], align 8
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[V]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
index 6efb035..000dc4a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
@@ -22,20 +22,6 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) {
; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP_SRC1:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV1]]
-; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[GEP_SRC1]], align 1
-; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32
-; CHECK-NEXT: [[MUL16:%.*]] = mul i32 0, [[CONV]]
-; CHECK-NEXT: [[SHR35:%.*]] = lshr i32 [[MUL16]], 1
-; CHECK-NEXT: [[CONV36:%.*]] = trunc i32 [[SHR35]] to i8
-; CHECK-NEXT: store i8 [[CONV36]], ptr null, align 1
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV1]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV1]], 8
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
index 9095d6e..bae97e5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
@@ -29,16 +29,6 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
; SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; SCALABLE: [[SCALAR_PH:.*]]:
-; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8
-; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -97,16 +87,6 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; TF-SCALABLE: [[SCALAR_PH:.*]]:
-; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8
-; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
@@ -292,22 +272,6 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
; SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; SCALABLE: [[SCALAR_PH:.*]]:
-; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
-; SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10
-; SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]]
-; SCALABLE: [[DO_LOAD]]:
-; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8
-; SCALABLE-NEXT: br label %[[LATCH]]
-; SCALABLE: [[LATCH]]:
-; SCALABLE-NEXT: [[PHI:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[V]], %[[DO_LOAD]] ]
-; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; SCALABLE-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
-; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -389,22 +353,6 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; TF-SCALABLE: [[SCALAR_PH:.*]]:
-; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
-; TF-SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10
-; TF-SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]]
-; TF-SCALABLE: [[DO_LOAD]]:
-; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8
-; TF-SCALABLE-NEXT: br label %[[LATCH]]
-; TF-SCALABLE: [[LATCH]]:
-; TF-SCALABLE-NEXT: [[PHI:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[V]], %[[DO_LOAD]] ]
-; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; TF-SCALABLE-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
-; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
@@ -451,19 +399,9 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
; SCALABLE-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; SCALABLE-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
; SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; SCALABLE: [[SCALAR_PH:.*]]:
-; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1
-; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -519,19 +457,9 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; TF-SCALABLE: [[SCALAR_PH:.*]]:
-; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1
-; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
@@ -571,19 +499,9 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
; SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
; SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; SCALABLE: [[SCALAR_PH:.*]]:
-; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
-; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -639,19 +557,9 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; TF-SCALABLE: [[SCALAR_PH:.*]]:
-; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
-; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
@@ -700,19 +608,9 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
; SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; SCALABLE: [[SCALAR_PH:.*]]:
-; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; SCALABLE-NEXT: store i64 [[IV]], ptr [[B]], align 8
-; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -781,19 +679,9 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; TF-SCALABLE: [[SCALAR_PH:.*]]:
-; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; TF-SCALABLE-NEXT: store i64 [[IV]], ptr [[B]], align 8
-; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
@@ -843,24 +731,9 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; SCALABLE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
; SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; SCALABLE: [[SCALAR_PH:.*]]:
-; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
-; SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10
-; SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]]
-; SCALABLE: [[DO_STORE]]:
-; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
-; SCALABLE-NEXT: br label %[[LATCH]]
-; SCALABLE: [[LATCH]]:
-; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -939,24 +812,9 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; TF-SCALABLE: [[SCALAR_PH:.*]]:
-; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
-; TF-SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10
-; TF-SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]]
-; TF-SCALABLE: [[DO_STORE]]:
-; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
-; TF-SCALABLE-NEXT: br label %[[LATCH]]
-; TF-SCALABLE: [[LATCH]]:
-; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
@@ -1002,19 +860,9 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap
; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
; SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
; SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; SCALABLE: [[SCALAR_PH:.*]]:
-; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1
-; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -1070,19 +918,9 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
-; TF-SCALABLE: [[SCALAR_PH:.*]]:
-; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1
-; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
index 8c67b4c..1676461 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
@@ -15,15 +15,6 @@ define void @foo(ptr %arg) #0 {
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr [3 x i64], ptr [[ARG]], i64 0, i64 [[IV]]
-; CHECK-NEXT: store i64 0, ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 3
-; CHECK-NEXT: br i1 [[COND]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -61,18 +52,8 @@ define i32 @test_remove_iv(i32 %start) #0 {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP5]])
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[START]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED_NEXT]] = xor i32 [[RED]], 3
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 5
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
index 649ce60..0a64723 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
@@ -30,21 +30,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP22]]
-; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
; IF-EVL: for.cond.cleanup:
; IF-EVL-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
index b0f0c39..b106f99 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll
@@ -25,11 +25,7 @@ define i32 @foo(ptr nocapture %A) {
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 poison
;
@@ -76,11 +72,7 @@ define i32 @foo1(ptr nocapture noalias %A, ptr nocapture %PtrPtr) {
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 poison
;
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll
index 1d4cbc3..78c71fd 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll
@@ -38,15 +38,6 @@ define void @test_scalar_steps_target_instruction_cost(ptr %dst) {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i64 [[IV]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 3
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV]], 22
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll
index a423f06..02e82b4 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll
@@ -91,23 +91,7 @@ define void @test(ptr %p, i40 %a) {
; CHECK: pred.store.continue30:
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[SHL:%.*]] = shl i40 [[A]], 24
-; CHECK-NEXT: [[ASHR:%.*]] = ashr i40 [[SHL]], 28
-; CHECK-NEXT: [[TRUNC:%.*]] = trunc i40 [[ASHR]] to i32
-; CHECK-NEXT: [[ICMP_EQ:%.*]] = icmp eq i32 [[TRUNC]], 0
-; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[ICMP_EQ]] to i32
-; CHECK-NEXT: [[ICMP_ULT:%.*]] = icmp ult i32 0, [[ZEXT]]
-; CHECK-NEXT: [[OR:%.*]] = or i1 [[ICMP_ULT]], true
-; CHECK-NEXT: [[ICMP_SGT:%.*]] = icmp sgt i1 [[OR]], false
-; CHECK-NEXT: store i1 [[ICMP_SGT]], ptr [[P]], align 1
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[IV_NEXT]], 10
-; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll
index 3c788b2..ee84ef2 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll
@@ -63,19 +63,7 @@ define void @func_21() {
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 6
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[LV:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[A_PTR:%.*]] = getelementptr inbounds [5 x i32], ptr @A, i64 0, i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[LV]] = load i32, ptr [[A_PTR]], align 4
-; CHECK-NEXT: [[B_PTR:%.*]] = getelementptr inbounds [5 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i32 [[SCALAR_RECUR]], ptr [[B_PTR]], align 4
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 5
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll
index d40cb6e..cfb1805 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll
@@ -66,25 +66,6 @@ define void @test_scalar_iv_steps_used_by_replicate_and_first_lane_only_vpinst(p
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[MUL_IV:%.*]] = mul nsw i64 [[IV]], 4
-; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds i8, ptr [[SRC_1]], i64 [[MUL_IV]]
-; CHECK-NEXT: [[L_1:%.*]] = load i8, ptr [[GEP_SRC_1]], align 1
-; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[L_1]], 0
-; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: [[IV_OR:%.*]] = or disjoint i64 [[IV]], 4
-; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds [8 x i32], ptr @src, i64 0, i64 [[IV_OR]]
-; CHECK-NEXT: [[L_2:%.*]] = load i32, ptr [[GEP_SRC]], align 4
-; CHECK-NEXT: store i32 [[L_2]], ptr [[DST]], align 4
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 4
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll b/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll
index 9dd7e9f..f65a9d7 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll
@@ -22,19 +22,7 @@ define void @f1() {
; CHECK-NEXT: store <2 x ptr> <ptr @a, ptr @a>, ptr [[TMP1]], align 8
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[BB3:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[BB2:%.*]]
-; CHECK: bb2:
-; CHECK-NEXT: [[C_1_0:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[_TMP9:%.*]], [[BB2]] ]
-; CHECK-NEXT: [[_TMP1:%.*]] = zext i16 0 to i64
-; CHECK-NEXT: [[_TMP2:%.*]] = getelementptr [1 x %rec8], ptr @a, i16 0, i64 [[_TMP1]]
-; CHECK-NEXT: [[_TMP6:%.*]] = sext i16 [[C_1_0]] to i64
-; CHECK-NEXT: [[_TMP7:%.*]] = getelementptr [2 x ptr], ptr @b, i16 0, i64 [[_TMP6]]
-; CHECK-NEXT: store ptr [[_TMP2]], ptr [[_TMP7]], align 8
-; CHECK-NEXT: [[_TMP9]] = add nsw i16 [[C_1_0]], 1
-; CHECK-NEXT: [[_TMP11:%.*]] = icmp slt i16 [[_TMP9]], 2
-; CHECK-NEXT: br i1 [[_TMP11]], label [[BB2]], label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: ret void
;
@@ -102,25 +90,7 @@ define void @redundant_or_1(ptr %dst, i1 %c.0, i1 %c.1) {
; CHECK: pred.store.continue8:
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: br i1 [[C_1]], label [[LOOP_LATCH]], label [[THEN_1:%.*]]
-; CHECK: then.1:
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2
-; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], true
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR]], i1 [[C_0]], i1 false
-; CHECK-NEXT: br i1 [[COND]], label [[THEN_2:%.*]], label [[LOOP_LATCH]]
-; CHECK: then.2:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[IV]]
-; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 3
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -195,25 +165,7 @@ define void @redundant_or_2(ptr %dst, i1 %c.0, i1 %c.1) {
; CHECK: pred.store.continue8:
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[THEN_1:%.*]]
-; CHECK: then.1:
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2
-; CHECK-NEXT: [[OR:%.*]] = or i1 true, [[CMP]]
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR]], i1 [[C_1]], i1 false
-; CHECK-NEXT: br i1 [[COND]], label [[THEN_2:%.*]], label [[LOOP_LATCH]]
-; CHECK: then.2:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[IV]]
-; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 3
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -289,25 +241,7 @@ define void @redundant_and_1(ptr %dst, i1 %c.0, i1 %c.1) {
; CHECK: pred.store.continue8:
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[THEN_1:%.*]]
-; CHECK: then.1:
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2
-; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], false
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR]], i1 [[C_1]], i1 false
-; CHECK-NEXT: br i1 [[COND]], label [[THEN_2:%.*]], label [[LOOP_LATCH]]
-; CHECK: then.2:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[IV]]
-; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 3
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -341,6 +275,23 @@ exit:
define void @redundant_and_2(ptr %dst, i1 %c.0, i1 %c.1) {
; CHECK-LABEL: @redundant_and_2(
; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
+; CHECK: loop.header:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT: br i1 [[C_0:%.*]], label [[LOOP_LATCH]], label [[THEN_1:%.*]]
+; CHECK: then.1:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2
+; CHECK-NEXT: [[OR:%.*]] = and i1 false, [[CMP]]
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR]], i1 [[C_1:%.*]], i1 false
+; CHECK-NEXT: br i1 [[COND]], label [[THEN_2:%.*]], label [[LOOP_LATCH]]
+; CHECK: then.2:
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i32 [[IV]]
+; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4
+; CHECK-NEXT: br label [[LOOP_LATCH]]
+; CHECK: loop.latch:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 3
+; CHECK-NEXT: br i1 [[EC]], label [[EXIT:%.*]], label [[LOOP_HEADER]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll
index ee88abb..e0dd376 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll
@@ -92,24 +92,8 @@ define i64 @second_lshr_operand_zero_via_scev() {
; CHECK-NEXT: [[BIN_RDX:%.*]] = or <2 x i64> [[TMP11]], [[TMP10]]
; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[BIN_RDX]])
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOPS:.*]]
-; CHECK: [[LOOPS]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOPS]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOPS]] ]
-; CHECK-NEXT: [[C:%.*]] = icmp eq i64 [[IV]], 0
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[IV]], 0
-; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[TMP14]], [[EXT_0]]
-; CHECK-NEXT: [[CONV_1:%.*]] = zext i32 [[SHR]] to i64
-; CHECK-NEXT: [[RED_NEXT_V:%.*]] = select i1 [[C]], i64 [[AND]], i64 [[CONV_1]]
-; CHECK-NEXT: [[RED_NEXT]] = or i64 [[RED_NEXT_V]], [[RED]]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOPS]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[RED_NEXT]], %[[LOOPS]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[RES]]
+; CHECK-NEXT: ret i64 [[TMP13]]
;
entry:
%ext.0 = sext i8 0 to i32
diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll
index 0ba885d..9453ad7 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll
@@ -696,19 +696,9 @@ define i64 @live_in_known_1_via_scev() {
; CHECK-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> [[VEC_PHI]])
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 3, [[SCALAR_PH]] ], [ [[RED_MUL:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED_MUL]] = mul nsw i64 [[RED]], [[P_EXT]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[RED_MUL]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[RES]]
+; CHECK-NEXT: ret i64 [[TMP3]]
;
entry:
%sel = select i1 false, i32 3, i32 0
@@ -753,22 +743,9 @@ define i64 @cost_loop_invariant_recipes(i1 %x, i64 %y) {
; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> [[TMP3]])
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT_I_I_I:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 1, [[SCALAR_PH]] ], [ [[RED_MUL:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[NOT_X:%.*]] = xor i1 [[X]], true
-; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[NOT_X]] to i64
-; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[Y]], [[EXT]]
-; CHECK-NEXT: [[RED_MUL]] = mul i64 [[SHL]], [[RED]]
-; CHECK-NEXT: [[IV_NEXT_I_I_I]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[RED_MUL_LCSSA:%.*]] = phi i64 [ [[RED_MUL]], [[LOOP]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[RED_MUL_LCSSA]]
+; CHECK-NEXT: ret i64 [[TMP4]]
;
entry:
br label %loop
@@ -808,20 +785,9 @@ define i32 @narrowed_reduction(ptr %a, i1 %cmp) #0 {
; CHECK: middle.block:
; CHECK-NEXT: [[TMP20:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP5]])
; CHECK-NEXT: [[TMP21:%.*]] = zext i1 [[TMP20]] to i32
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP1:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[VEC_EPILOG_PH:%.*]] ], [ [[INC:%.*]], [[LOOP1]] ]
-; CHECK-NEXT: [[OR13:%.*]] = phi i32 [ 0, [[VEC_EPILOG_PH]] ], [ [[OR:%.*]], [[LOOP1]] ]
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[OR13]], 1
-; CHECK-NEXT: [[OR]] = or i32 [[AND]], [[CONV]]
-; CHECK-NEXT: [[INC]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 16
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP1]]
; CHECK: exit:
-; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], [[LOOP1]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[OR_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP21]]
;
entry:
%conv = zext i1 %cmp to i32
diff --git a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll
index 3d07eca..249efe1 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll
@@ -39,30 +39,9 @@ define i1 @fn(ptr %nno) #0 {
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP12]])
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY20:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 10, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC35:%.*]] ]
-; CHECK-NEXT: [[SUM_01:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[SUM_1:%.*]], [[FOR_INC35]] ]
-; CHECK-NEXT: [[REM4:%.*]] = and i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i64 [[REM4]], 0
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[NNO]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[GEP]], align 4
-; CHECK-NEXT: br i1 [[CMP21]], label [[IF_THEN22:%.*]], label [[FOR_INC35]]
-; CHECK: if.then:
-; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[TMP15]], 1
-; CHECK-NEXT: [[REM27:%.*]] = urem i32 [[MUL]], 10
-; CHECK-NEXT: br label [[FOR_INC35]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[REM27_PN:%.*]] = phi i32 [ [[REM27]], [[IF_THEN22]] ], [ [[TMP15]], [[FOR_BODY20]] ]
-; CHECK-NEXT: [[SUM_1]] = or i32 [[REM27_PN]], [[SUM_01]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
-; CHECK-NEXT: [[CMP19_NOT:%.*]] = icmp eq i64 [[INDVARS_IV]], 0
-; CHECK-NEXT: br i1 [[CMP19_NOT]], label [[EXIT]], label [[FOR_BODY20]]
+; CHECK-NEXT: br label [[FOR_INC35:%.*]]
; CHECK: exit:
-; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_1]], [[FOR_INC35]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[CMP41:%.*]] = icmp eq i32 [[SUM_1_LCSSA]], 0
+; CHECK-NEXT: [[CMP41:%.*]] = icmp eq i32 [[TMP14]], 0
; CHECK-NEXT: ret i1 [[CMP41]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll
index d0c311eb..cc84fab 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll
@@ -495,18 +495,7 @@ define void @test_first_order_recurrence_tried_to_scalarized(ptr %dst, i1 %c, i3
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ 4, [[SCALAR_PH]] ], [ [[IV]], [[LOOP]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[FOR]]
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds nuw i32, ptr [[DST]], i32 [[IV]]
-; CHECK-NEXT: store i32 [[SUB]], ptr [[GEP_DST]], align 4
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll
index 9528510..2f33e11 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll
@@ -45,7 +45,8 @@ define void @foo1(ptr noalias %in, ptr noalias %out, ptr noalias %trigger, ptr n
; AVX512-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; AVX512: middle.block:
; AVX512-NEXT: br label [[FOR_END:%.*]]
-; AVX512: scalar.ph:
+; AVX512: for.end:
+; AVX512-NEXT: ret void
;
; FVW2-LABEL: @foo1(
; FVW2-NEXT: entry:
@@ -70,7 +71,8 @@ define void @foo1(ptr noalias %in, ptr noalias %out, ptr noalias %trigger, ptr n
; FVW2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; FVW2: middle.block:
; FVW2-NEXT: br label [[FOR_END:%.*]]
-; FVW2: scalar.ph:
+; FVW2: for.end:
+; FVW2-NEXT: ret void
;
entry:
br label %for.body
@@ -137,7 +139,8 @@ define void @foo2(ptr noalias %in, ptr noalias %out, ptr noalias %trigger, ptr n
; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; AVX512: middle.block:
; AVX512-NEXT: br label [[FOR_END:%.*]]
-; AVX512: scalar.ph:
+; AVX512: for.end:
+; AVX512-NEXT: ret void
;
; FVW2-LABEL: @foo2(
; FVW2-NEXT: entry:
@@ -182,7 +185,8 @@ define void @foo2(ptr noalias %in, ptr noalias %out, ptr noalias %trigger, ptr n
; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; FVW2: middle.block:
; FVW2-NEXT: br label [[FOR_END:%.*]]
-; FVW2: scalar.ph:
+; FVW2: for.end:
+; FVW2-NEXT: ret void
;
entry:
br label %for.body
@@ -250,7 +254,8 @@ define void @foo3(ptr noalias %in, ptr noalias %out, ptr noalias %trigger) {
; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; AVX512: middle.block:
; AVX512-NEXT: br label [[FOR_END:%.*]]
-; AVX512: scalar.ph:
+; AVX512: for.end:
+; AVX512-NEXT: ret void
;
; FVW2-LABEL: @foo3(
; FVW2-NEXT: entry:
@@ -295,7 +300,8 @@ define void @foo3(ptr noalias %in, ptr noalias %out, ptr noalias %trigger) {
; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; FVW2: middle.block:
; FVW2-NEXT: br label [[FOR_END:%.*]]
-; FVW2: scalar.ph:
+; FVW2: for.end:
+; FVW2-NEXT: ret void
;
entry:
br label %for.body
@@ -350,7 +356,8 @@ define void @foo2_addrspace(ptr addrspace(1) noalias %in, ptr addrspace(1) noali
; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; AVX512: middle.block:
; AVX512-NEXT: br label [[FOR_END:%.*]]
-; AVX512: scalar.ph:
+; AVX512: for.end:
+; AVX512-NEXT: ret void
;
; FVW2-LABEL: @foo2_addrspace(
; FVW2-NEXT: entry:
@@ -395,7 +402,8 @@ define void @foo2_addrspace(ptr addrspace(1) noalias %in, ptr addrspace(1) noali
; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; FVW2: middle.block:
; FVW2-NEXT: br label [[FOR_END:%.*]]
-; FVW2: scalar.ph:
+; FVW2: for.end:
+; FVW2-NEXT: ret void
;
entry:
br label %for.body
@@ -449,7 +457,8 @@ define void @foo2_addrspace2(ptr addrspace(1) noalias %in, ptr addrspace(0) noal
; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; AVX512: middle.block:
; AVX512-NEXT: br label [[FOR_END:%.*]]
-; AVX512: scalar.ph:
+; AVX512: for.end:
+; AVX512-NEXT: ret void
;
; FVW2-LABEL: @foo2_addrspace2(
; FVW2-NEXT: entry:
@@ -494,7 +503,8 @@ define void @foo2_addrspace2(ptr addrspace(1) noalias %in, ptr addrspace(0) noal
; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; FVW2: middle.block:
; FVW2-NEXT: br label [[FOR_END:%.*]]
-; FVW2: scalar.ph:
+; FVW2: for.end:
+; FVW2-NEXT: ret void
;
entry:
br label %for.body
@@ -548,7 +558,8 @@ define void @foo2_addrspace3(ptr addrspace(0) noalias %in, ptr addrspace(1) noal
; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; AVX512: middle.block:
; AVX512-NEXT: br label [[FOR_END:%.*]]
-; AVX512: scalar.ph:
+; AVX512: for.end:
+; AVX512-NEXT: ret void
;
; FVW2-LABEL: @foo2_addrspace3(
; FVW2-NEXT: entry:
@@ -593,7 +604,8 @@ define void @foo2_addrspace3(ptr addrspace(0) noalias %in, ptr addrspace(1) noal
; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; FVW2: middle.block:
; FVW2-NEXT: br label [[FOR_END:%.*]]
-; FVW2: scalar.ph:
+; FVW2: for.end:
+; FVW2-NEXT: ret void
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll
index b2d587c..877fcd4 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll
@@ -90,29 +90,9 @@ define double @sumIfVector(ptr nocapture readonly %arr) {
; SSE: middle.block:
; SSE-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x double> [[PREDPHI3]], [[PREDPHI]]
; SSE-NEXT: [[TMP11:%.*]] = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> [[BIN_RDX]])
-; SSE-NEXT: br label [[DONE:%.*]]
-; SSE: scalar.ph:
-; SSE-NEXT: br label [[LOOP:%.*]]
-; SSE: loop:
-; SSE-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[NEXT_ITER:%.*]] ]
-; SSE-NEXT: [[TOT:%.*]] = phi double [ 0.000000e+00, [[SCALAR_PH]] ], [ [[TOT_NEXT:%.*]], [[NEXT_ITER]] ]
-; SSE-NEXT: [[ADDR:%.*]] = getelementptr double, ptr [[ARR]], i32 [[I]]
-; SSE-NEXT: [[NEXTVAL:%.*]] = load double, ptr [[ADDR]], align 8
-; SSE-NEXT: [[TST:%.*]] = fcmp fast une double [[NEXTVAL]], 4.200000e+01
-; SSE-NEXT: br i1 [[TST]], label [[DO_ADD:%.*]], label [[NO_ADD:%.*]]
-; SSE: do.add:
-; SSE-NEXT: [[TOT_NEW:%.*]] = fadd fast double [[TOT]], [[NEXTVAL]]
-; SSE-NEXT: br label [[NEXT_ITER]]
-; SSE: no.add:
-; SSE-NEXT: br label [[NEXT_ITER]]
-; SSE: next.iter:
-; SSE-NEXT: [[TOT_NEXT]] = phi double [ [[TOT]], [[NO_ADD]] ], [ [[TOT_NEW]], [[DO_ADD]] ]
-; SSE-NEXT: [[I_NEXT]] = add i32 [[I]], 1
-; SSE-NEXT: [[AGAIN:%.*]] = icmp ult i32 [[I_NEXT]], 32
-; SSE-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]]
+; SSE-NEXT: br label [[NEXT_ITER:%.*]]
; SSE: done:
-; SSE-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ]
-; SSE-NEXT: ret double [[TOT_NEXT_LCSSA]]
+; SSE-NEXT: ret double [[TMP11]]
;
; AVX-LABEL: @sumIfVector(
; AVX-NEXT: entry:
@@ -153,29 +133,9 @@ define double @sumIfVector(ptr nocapture readonly %arr) {
; AVX-NEXT: [[BIN_RDX10:%.*]] = fadd fast <4 x double> [[PREDPHI8]], [[BIN_RDX]]
; AVX-NEXT: [[BIN_RDX11:%.*]] = fadd fast <4 x double> [[PREDPHI9]], [[BIN_RDX10]]
; AVX-NEXT: [[TMP21:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[BIN_RDX11]])
-; AVX-NEXT: br label [[DONE:%.*]]
-; AVX: scalar.ph:
-; AVX-NEXT: br label [[LOOP:%.*]]
-; AVX: loop:
-; AVX-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[NEXT_ITER:%.*]] ]
-; AVX-NEXT: [[TOT:%.*]] = phi double [ 0.000000e+00, [[SCALAR_PH]] ], [ [[TOT_NEXT:%.*]], [[NEXT_ITER]] ]
-; AVX-NEXT: [[ADDR:%.*]] = getelementptr double, ptr [[ARR]], i32 [[I]]
-; AVX-NEXT: [[NEXTVAL:%.*]] = load double, ptr [[ADDR]], align 8
-; AVX-NEXT: [[TST:%.*]] = fcmp fast une double [[NEXTVAL]], 4.200000e+01
-; AVX-NEXT: br i1 [[TST]], label [[DO_ADD:%.*]], label [[NO_ADD:%.*]]
-; AVX: do.add:
-; AVX-NEXT: [[TOT_NEW:%.*]] = fadd fast double [[TOT]], [[NEXTVAL]]
-; AVX-NEXT: br label [[NEXT_ITER]]
-; AVX: no.add:
-; AVX-NEXT: br label [[NEXT_ITER]]
-; AVX: next.iter:
-; AVX-NEXT: [[TOT_NEXT]] = phi double [ [[TOT]], [[NO_ADD]] ], [ [[TOT_NEW]], [[DO_ADD]] ]
-; AVX-NEXT: [[I_NEXT]] = add i32 [[I]], 1
-; AVX-NEXT: [[AGAIN:%.*]] = icmp ult i32 [[I_NEXT]], 32
-; AVX-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]]
+; AVX-NEXT: br label [[NEXT_ITER:%.*]]
; AVX: done:
-; AVX-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ]
-; AVX-NEXT: ret double [[TOT_NEXT_LCSSA]]
+; AVX-NEXT: ret double [[TMP21]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll
index 27eef01..a19b294 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll
@@ -409,21 +409,9 @@ define i16 @iv_and_step_trunc() {
; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <2 x i16> [[TMP2]], i32 0
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[REC_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[IV]] to i16
-; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[IV_NEXT]] to i16
-; CHECK-NEXT: [[REC_NEXT]] = mul i16 [[TMP3]], [[TMP4]]
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[REC_LCSSA:%.*]] = phi i16 [ [[SCALAR_RECUR]], [[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i16 [[REC_LCSSA]]
+; CHECK-NEXT: ret i16 [[VECTOR_RECUR_EXTRACT_FOR_PHI]]
;
entry:
br label %loop
@@ -612,16 +600,7 @@ define void @wide_iv_trunc(ptr %dst, i64 %N) {
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-NEXT: store i32 [[IV_TRUNC]], ptr [[DST]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT_LOOPEXIT]], label [[LOOP]]
; CHECK: exit.loopexit:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll
index 91c7e7a3..2f96278 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll
@@ -38,36 +38,6 @@ define void @test_free_instructions_feeding_geps_for_interleave_groups(ptr noali
; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[P_INVAR]], align 4
-; CHECK-NEXT: [[IV_MUL:%.*]] = shl i64 [[IV]], 2
-; CHECK-NEXT: [[GEP_DST_19:%.*]] = getelementptr float, ptr [[DST_1]], i64 [[IV_MUL]]
-; CHECK-NEXT: store float [[L_0]], ptr [[GEP_DST_19]], align 4
-; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[P_INVAR]], align 4
-; CHECK-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[IV_MUL]], 1
-; CHECK-NEXT: [[GEP_DST_119:%.*]] = getelementptr float, ptr [[DST_1]], i64 [[ADD_1]]
-; CHECK-NEXT: store float [[L_1]], ptr [[GEP_DST_119]], align 4
-; CHECK-NEXT: [[ADD_2:%.*]] = or disjoint i64 [[IV_MUL]], 2
-; CHECK-NEXT: [[GEP_DST_129:%.*]] = getelementptr float, ptr [[DST_1]], i64 [[ADD_2]]
-; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_129]], align 4
-; CHECK-NEXT: [[ADD_3:%.*]] = or disjoint i64 [[IV_MUL]], 3
-; CHECK-NEXT: [[GEP_DST_140:%.*]] = getelementptr float, ptr [[DST_1]], i64 [[ADD_3]]
-; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_140]], align 4
-; CHECK-NEXT: [[L_2:%.*]] = load float, ptr [[P_INVAR]], align 4
-; CHECK-NEXT: [[GEP_DST_247:%.*]] = getelementptr float, ptr [[DST_2]], i64 [[IV_MUL]]
-; CHECK-NEXT: store float [[L_2]], ptr [[GEP_DST_247]], align 4
-; CHECK-NEXT: [[GEP_DST_255:%.*]] = getelementptr float, ptr [[DST_2]], i64 [[ADD_1]]
-; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_255]], align 4
-; CHECK-NEXT: [[GEP_DST_265:%.*]] = getelementptr float, ptr [[DST_2]], i64 [[ADD_2]]
-; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_265]], align 4
-; CHECK-NEXT: [[GEP_DST_276:%.*]] = getelementptr float, ptr [[DST_2]], i64 [[ADD_3]]
-; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_276]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -504,17 +474,6 @@ define void @interleave_store_double_i64(ptr %dst) {
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]], i32 1
-; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_1]], align 8
-; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_0]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -616,17 +575,6 @@ define void @interleave_store_i64_double_2(ptr %dst) {
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_0]], align 8
-; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]], i32 1
-; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_1]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll
index 228bc80..e2329fe 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll
@@ -34,13 +34,9 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
; SSE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; SSE-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; SSE: middle.block:
-; SSE-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; SSE: scalar.ph:
; SSE-NEXT: br label [[FOR_BODY:%.*]]
; SSE: for.cond.cleanup:
; SSE-NEXT: ret void
-; SSE: for.body:
-; SSE-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
;
; AVX1-LABEL: @foo(
; AVX1-NEXT: entry:
@@ -88,13 +84,9 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
; AVX1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; AVX1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; AVX1: middle.block:
-; AVX1-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; AVX1: scalar.ph:
; AVX1-NEXT: br label [[FOR_BODY:%.*]]
; AVX1: for.cond.cleanup:
; AVX1-NEXT: ret void
-; AVX1: for.body:
-; AVX1-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
;
; AVX2-LABEL: @foo(
; AVX2-NEXT: entry:
@@ -142,13 +134,9 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
; AVX2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; AVX2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; AVX2: middle.block:
-; AVX2-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; AVX2: scalar.ph:
; AVX2-NEXT: br label [[FOR_BODY:%.*]]
; AVX2: for.cond.cleanup:
; AVX2-NEXT: ret void
-; AVX2: for.body:
-; AVX2-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
;
; ATOM-LABEL: @foo(
; ATOM-NEXT: entry:
diff --git a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll
index 5853e91..5d40e6a 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll
@@ -409,16 +409,6 @@ define void @test_store_of_final_reduction_value(i64 %x, ptr %dst) {
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> [[TMP0]])
; CHECK-NEXT: store i64 [[TMP1]], ptr [[DST]], align 8
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV4:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED_NEXT]] = mul i64 [[RED]], [[X]]
-; CHECK-NEXT: store i64 [[RED_NEXT]], ptr [[DST]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV4]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV4]], 1
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll
index 9e0ef73..2a8c698 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll
@@ -63,27 +63,9 @@ define i32 @test_explicit_pred(i64 %len) {
; CHECK-NEXT: [[BIN_RDX13:%.*]] = add <4 x i32> [[TMP18]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX14:%.*]] = add <4 x i32> [[TMP19]], [[BIN_RDX13]]
; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX14]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EARLYCND:%.*]] = icmp slt i64 [[IV]], [[LEN]]
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP21]]
;
entry:
%alloca = alloca [4096 x i32]
@@ -212,28 +194,9 @@ define i32 @test_explicit_pred_generic(i64 %len, ptr %test_base) {
; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]]
; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP77]]
;
entry:
%alloca = alloca [4096 x i32]
@@ -390,27 +353,9 @@ define i32 @test_invariant_address(i64 %len, ptr %test_base) {
; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <4 x i32> [[TMP98]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <4 x i32> [[TMP99]], [[BIN_RDX7]]
; CHECK-NEXT: [[TMP101:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX8]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ALLOCA]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP101]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP101]]
;
entry:
%alloca = alloca [4096 x i32]
@@ -659,28 +604,9 @@ define i32 @test_step_narrower_than_access(i64 %len, ptr %test_base) {
; CHECK-NEXT: [[BIN_RDX37:%.*]] = add <4 x i32> [[TMP146]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX38:%.*]] = add <4 x i32> [[TMP147]], [[BIN_RDX37]]
; CHECK-NEXT: [[TMP149:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX38]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR_I16P:%.*]] = getelementptr inbounds i16, ptr [[ALLOCA]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR_I16P]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP149]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP149]]
;
entry:
%alloca = alloca [4096 x i32]
@@ -974,28 +900,9 @@ define i32 @test_non_zero_start(i64 %len, ptr %test_base) {
; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]]
; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1024, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP77]]
;
entry:
%alloca = alloca [4096 x i32]
@@ -1216,28 +1123,9 @@ define i32 @test_non_unit_stride(i64 %len, ptr %test_base) {
; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <4 x i32> [[TMP114]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <4 x i32> [[TMP115]], [[BIN_RDX7]]
; CHECK-NEXT: [[TMP117:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX8]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 2
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4093
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP117]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP117]]
;
entry:
%alloca = alloca [4096 x i32]
@@ -1366,28 +1254,9 @@ define i32 @neg_off_by_many(i64 %len, ptr %test_base) {
; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]]
; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP77]]
;
entry:
%alloca = alloca [1024 x i32]
@@ -1516,28 +1385,9 @@ define i32 @neg_off_by_one_iteration(i64 %len, ptr %test_base) {
; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]]
; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP77]]
;
entry:
%alloca = alloca [4095 x i32]
@@ -1666,28 +1516,9 @@ define i32 @neg_off_by_one_byte(i64 %len, ptr %test_base) {
; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]]
; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP77]]
;
entry:
%alloca = alloca [16383 x i8]
@@ -1985,28 +1816,9 @@ define i32 @test_allocsize(i64 %len, ptr %test_base) nofree nosync {
; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]]
; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCATION]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP77]]
;
entry:
%allocation = call nonnull ptr @my_alloc(i32 16384)
@@ -2136,28 +1948,9 @@ define i32 @test_allocsize_array(i64 %len, ptr %test_base) nofree nosync {
; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]]
; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCATION]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP77]]
;
entry:
%allocation = call nonnull ptr @my_array_alloc(i32 4096, i32 4)
@@ -2297,28 +2090,9 @@ define i32 @test_allocsize_cond_deref(i1 %allzero, ptr %test_base) {
; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]]
; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCATION]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP77]]
;
entry:
%allocation = call nonnull ptr @my_alloc(i32 16384)
diff --git a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll
index d0991a5..e23f8a9 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll
@@ -1199,19 +1199,7 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; O1VEC2-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64
; O1VEC2-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; O1VEC2: middle.block:
-; O1VEC2-NEXT: br label [[FOR_END:%.*]]
-; O1VEC2: scalar.ph:
; O1VEC2-NEXT: br label [[FOR_BODY:%.*]]
-; O1VEC2: for.body:
-; O1VEC2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; O1VEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS_IV]]
-; O1VEC2-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; O1VEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[N]]
-; O1VEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS_IV]]
-; O1VEC2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
-; O1VEC2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; O1VEC2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; O1VEC2-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]]
; O1VEC2: for.end:
; O1VEC2-NEXT: [[TMP11:%.*]] = load i32, ptr [[A]], align 4
; O1VEC2-NEXT: ret i32 [[TMP11]]
@@ -1239,19 +1227,7 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
; OzVEC2-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64
; OzVEC2-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; OzVEC2: middle.block:
-; OzVEC2-NEXT: br label [[FOR_END:%.*]]
-; OzVEC2: scalar.ph:
; OzVEC2-NEXT: br label [[FOR_BODY:%.*]]
-; OzVEC2: for.body:
-; OzVEC2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; OzVEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS_IV]]
-; OzVEC2-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; OzVEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[N]]
-; OzVEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS_IV]]
-; OzVEC2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
-; OzVEC2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; OzVEC2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64
-; OzVEC2-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]]
; OzVEC2: for.end:
; OzVEC2-NEXT: [[TMP11:%.*]] = load i32, ptr [[A]], align 4
; OzVEC2-NEXT: ret i32 [[TMP11]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll
index fc37e5f..e1140b5 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll
@@ -32,18 +32,6 @@ define i32 @foo_optsize() #0 {
; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1
-; CHECK-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[INC]] = add nsw i32 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: ret i32 0
;
@@ -69,18 +57,6 @@ define i32 @foo_optsize() #0 {
; AUTOVF-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; AUTOVF: [[MIDDLE_BLOCK]]:
; AUTOVF-NEXT: br label %[[FOR_END:.*]]
-; AUTOVF: [[SCALAR_PH:.*]]:
-; AUTOVF-NEXT: br label %[[FOR_BODY:.*]]
-; AUTOVF: [[FOR_BODY]]:
-; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
-; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
-; AUTOVF-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1
-; AUTOVF-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1
-; AUTOVF-NEXT: [[INC]] = add nsw i32 [[I_08]], 1
-; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202
-; AUTOVF-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; AUTOVF: [[FOR_END]]:
; AUTOVF-NEXT: ret i32 0
;
@@ -128,18 +104,6 @@ define i32 @foo_minsize() #1 {
; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1
-; CHECK-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[INC]] = add nsw i32 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: ret i32 0
;
@@ -165,18 +129,6 @@ define i32 @foo_minsize() #1 {
; AUTOVF-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; AUTOVF: [[MIDDLE_BLOCK]]:
; AUTOVF-NEXT: br label %[[FOR_END:.*]]
-; AUTOVF: [[SCALAR_PH:.*]]:
-; AUTOVF-NEXT: br label %[[FOR_BODY:.*]]
-; AUTOVF: [[FOR_BODY]]:
-; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
-; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
-; AUTOVF-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1
-; AUTOVF-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1
-; AUTOVF-NEXT: [[INC]] = add nsw i32 [[I_08]], 1
-; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202
-; AUTOVF-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; AUTOVF: [[FOR_END]]:
; AUTOVF-NEXT: ret i32 0
;
@@ -226,18 +178,6 @@ define void @scev4stride1(ptr noalias nocapture %a, ptr noalias nocapture readon
; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ [[INC:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_07]], [[K]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[MUL]]
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_07]]
-; CHECK-NEXT: store i32 [[TMP6]], ptr [[ARRAYIDX1]], align 4
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]]
; CHECK: [[FOR_END_LOOPEXIT]]:
; CHECK-NEXT: ret void
;
@@ -263,18 +203,6 @@ define void @scev4stride1(ptr noalias nocapture %a, ptr noalias nocapture readon
; AUTOVF-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; AUTOVF: [[MIDDLE_BLOCK]]:
; AUTOVF-NEXT: br label %[[FOR_END_LOOPEXIT:.*]]
-; AUTOVF: [[SCALAR_PH:.*]]:
-; AUTOVF-NEXT: br label %[[FOR_BODY:.*]]
-; AUTOVF: [[FOR_BODY]]:
-; AUTOVF-NEXT: [[I_07:%.*]] = phi i32 [ [[INC:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; AUTOVF-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_07]], [[K]]
-; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[MUL]]
-; AUTOVF-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; AUTOVF-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_07]]
-; AUTOVF-NEXT: store i32 [[TMP6]], ptr [[ARRAYIDX1]], align 4
-; AUTOVF-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1
-; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 256
-; AUTOVF-NEXT: br i1 [[EXITCOND]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]]
; AUTOVF: [[FOR_END_LOOPEXIT]]:
; AUTOVF-NEXT: ret void
;
@@ -431,14 +359,6 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72
-; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8
-; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -475,14 +395,6 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 {
; AUTOVF-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; AUTOVF: [[MIDDLE_BLOCK]]:
; AUTOVF-NEXT: br label %[[EXIT:.*]]
-; AUTOVF: [[SCALAR_PH:.*]]:
-; AUTOVF-NEXT: br label %[[LOOP:.*]]
-; AUTOVF: [[LOOP]]:
-; AUTOVF-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ]
-; AUTOVF-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72
-; AUTOVF-NEXT: store ptr null, ptr [[PTR_IV]], align 8
-; AUTOVF-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]]
-; AUTOVF-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; AUTOVF: [[EXIT]]:
; AUTOVF-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll b/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll
index 65f8487..5d76dfb 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll
@@ -108,11 +108,7 @@ define void @parallel_loop(ptr nocapture %a, ptr nocapture %b) nounwind uwtable
; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll b/llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll
index 62eacf6..619693a 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll
@@ -104,23 +104,8 @@ define i8 @pr141968(i1 %cond, i8 %v) {
; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: br i1 [[COND]], label %[[LOOP_LATCH]], label %[[COND_FALSE:.*]]
-; CHECK: [[COND_FALSE]]:
-; CHECK-NEXT: [[SDIV:%.*]] = sdiv i16 [[SEXT]], [[ZEXT_TRUE]]
-; CHECK-NEXT: [[SDIV_TRUNC:%.*]] = trunc i16 [[SDIV]] to i8
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[RET:%.*]] = phi i8 [ [[SDIV_TRUNC]], %[[COND_FALSE]] ], [ 0, %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i8 [[IV_NEXT]], 0
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RET_LCSSA:%.*]] = phi i8 [ [[RET]], %[[LOOP_LATCH]] ], [ [[PREDPHI]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i8 [[RET_LCSSA]]
+; CHECK-NEXT: ret i8 [[PREDPHI]]
;
entry:
%zext.true = zext i1 true to i16
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr34438.ll b/llvm/test/Transforms/LoopVectorize/X86/pr34438.ll
index 972164f..47db49c 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr34438.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr34438.ll
@@ -16,26 +16,13 @@ define void @small_tc(ptr noalias nocapture %A, ptr noalias nocapture readonly %
; CHECK: vector.ph:
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP0:%.*]], align 4, !llvm.access.group [[ACC_GRP0:![0-9]+]]
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x float>, ptr [[TMP2:%.*]], align 4, !llvm.access.group [[ACC_GRP0]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[B:%.*]], align 4, !llvm.access.group [[ACC_GRP0:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x float>, ptr [[A:%.*]], align 4, !llvm.access.group [[ACC_GRP0]]
; CHECK-NEXT: [[TMP4:%.*]] = fadd fast <8 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
-; CHECK-NEXT: store <8 x float> [[TMP4]], ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP0]]
+; CHECK-NEXT: store <8 x float> [[TMP4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP0]]
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP0]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP0]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP6]], [[TMP7]]
-; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP0]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP1:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll b/llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll
index 0098065..e7f56a4 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll
@@ -43,26 +43,8 @@ define ptr @test(ptr noalias %src, ptr noalias %dst) {
; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[IV]], 0
-; CHECK-NEXT: br i1 [[CMP_1]], label %[[LOOP_LATCH]], label %[[THEN:.*]]
-; CHECK: [[THEN]]:
-; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[M:%.*]] = phi i32 [ [[L]], %[[THEN]] ], [ 0, %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i32 [[M]], ptr [[GEP_DST]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
-; CHECK-NEXT: [[CMP_2:%.*]] = icmp slt i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[CMP_2]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[GEP_LCSSA:%.*]] = phi ptr [ [[GEP_SRC]], %[[LOOP_LATCH]] ], [ [[TMP2]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret ptr [[GEP_LCSSA]]
+; CHECK-NEXT: ret ptr [[TMP2]]
;
entry:
br label %loop.header
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
index 3922796..3616379 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
@@ -39,23 +39,7 @@ define void @test(ptr noundef align 8 dereferenceable_or_null(16) %arr) #0 {
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 12
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF0:![0-9]+]], !llvm.loop [[LOOP1:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[BB6:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 99, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[IV]], 1
-; CHECK-NEXT: [[ICMP17:%.*]] = icmp eq i64 [[AND]], 0
-; CHECK-NEXT: br i1 [[ICMP17]], label [[BB18:%.*]], label [[LOOP_LATCH]], !prof [[PROF5:![0-9]+]]
-; CHECK: bb18:
-; CHECK-NEXT: [[OR:%.*]] = or disjoint i64 [[IV]], 1
-; CHECK-NEXT: [[GETELEMENTPTR19:%.*]] = getelementptr inbounds i64, ptr [[ARR]], i64 [[OR]]
-; CHECK-NEXT: store i64 1, ptr [[GETELEMENTPTR19]], align 8
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
-; CHECK-NEXT: [[ICMP22:%.*]] = icmp eq i64 [[IV_NEXT]], 90
-; CHECK-NEXT: br i1 [[ICMP22]], label [[BB6]], label [[LOOP_HEADER]], !prof [[PROF6:![0-9]+]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: bb6:
; CHECK-NEXT: ret void
;
@@ -99,6 +83,4 @@ attributes #0 = {"target-cpu"="haswell" "target-features"="+avx2" }
; CHECK: [[META2]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK: [[META4]] = !{!"llvm.loop.estimated_trip_count", i32 24}
-; CHECK: [[PROF5]] = !{!"branch_weights", i32 1, i32 1}
-; CHECK: [[PROF6]] = !{!"branch_weights", i32 1, i32 95}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll
index 2bc3a97..f066000 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll
@@ -71,23 +71,11 @@ define float @reduction_sum_float_fastmath(i32 %n, ptr %array) {
; CHECK: middle.block:
; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP7]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[BIN_RDX]])
-; CHECK-NEXT: br label [[LOOP_EXIT_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_INC:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[SUM_INC:%.*]], [[LOOP]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ADDRESS:%.*]] = getelementptr float, ptr [[ARRAY]], i32 [[IDX]]
-; CHECK-NEXT: [[VALUE:%.*]] = load float, ptr [[ADDRESS]], align 4
-; CHECK-NEXT: [[SUM_INC]] = fadd fast float [[SUM]], [[VALUE]]
-; CHECK-NEXT: [[IDX_INC]] = add i32 [[IDX]], 1
-; CHECK-NEXT: [[BE_COND:%.*]] = icmp ne i32 [[IDX_INC]], 4096
-; CHECK-NEXT: br i1 [[BE_COND]], label [[LOOP]], label [[LOOP_EXIT_LOOPEXIT]]
; CHECK: loop.exit.loopexit:
-; CHECK-NEXT: [[SUM_INC_LCSSA:%.*]] = phi float [ [[SUM_INC]], [[LOOP]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[LOOP_EXIT]]
; CHECK: loop.exit:
-; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[SUM_INC_LCSSA]], [[LOOP_EXIT_LOOPEXIT]] ]
+; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP9]], [[LOOP]] ]
; CHECK-NEXT: ret float [[SUM_LCSSA]]
;
entry:
@@ -134,23 +122,11 @@ define float @reduction_sum_float_only_reassoc(i32 %n, ptr %array) {
; CHECK: middle.block:
; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <4 x float> [[TMP7]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = call reassoc float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[BIN_RDX]])
-; CHECK-NEXT: br label [[LOOP_EXIT_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_INC:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[SUM_INC:%.*]], [[LOOP]] ], [ -0.000000e+00, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ADDRESS:%.*]] = getelementptr float, ptr [[ARRAY]], i32 [[IDX]]
-; CHECK-NEXT: [[VALUE:%.*]] = load float, ptr [[ADDRESS]], align 4
-; CHECK-NEXT: [[SUM_INC]] = fadd reassoc float [[SUM]], [[VALUE]]
-; CHECK-NEXT: [[IDX_INC]] = add i32 [[IDX]], 1
-; CHECK-NEXT: [[BE_COND:%.*]] = icmp ne i32 [[IDX_INC]], 4096
-; CHECK-NEXT: br i1 [[BE_COND]], label [[LOOP]], label [[LOOP_EXIT_LOOPEXIT]]
; CHECK: loop.exit.loopexit:
-; CHECK-NEXT: [[SUM_INC_LCSSA:%.*]] = phi float [ [[SUM_INC]], [[LOOP]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[LOOP_EXIT]]
; CHECK: loop.exit:
-; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ -0.000000e+00, [[ENTRY:%.*]] ], [ [[SUM_INC_LCSSA]], [[LOOP_EXIT_LOOPEXIT]] ]
+; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ -0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP9]], [[LOOP]] ]
; CHECK-NEXT: ret float [[SUM_LCSSA]]
;
entry:
@@ -197,23 +173,11 @@ define float @reduction_sum_float_only_reassoc_and_contract(i32 %n, ptr %array)
; CHECK: middle.block:
; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc contract <4 x float> [[TMP7]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = call reassoc contract float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[BIN_RDX]])
-; CHECK-NEXT: br label [[LOOP_EXIT_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_INC:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[SUM_INC:%.*]], [[LOOP]] ], [ -0.000000e+00, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ADDRESS:%.*]] = getelementptr float, ptr [[ARRAY]], i32 [[IDX]]
-; CHECK-NEXT: [[VALUE:%.*]] = load float, ptr [[ADDRESS]], align 4
-; CHECK-NEXT: [[SUM_INC]] = fadd reassoc contract float [[SUM]], [[VALUE]]
-; CHECK-NEXT: [[IDX_INC]] = add i32 [[IDX]], 1
-; CHECK-NEXT: [[BE_COND:%.*]] = icmp ne i32 [[IDX_INC]], 4096
-; CHECK-NEXT: br i1 [[BE_COND]], label [[LOOP]], label [[LOOP_EXIT_LOOPEXIT]]
; CHECK: loop.exit.loopexit:
-; CHECK-NEXT: [[SUM_INC_LCSSA:%.*]] = phi float [ [[SUM_INC]], [[LOOP]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[LOOP_EXIT]]
; CHECK: loop.exit:
-; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ -0.000000e+00, [[ENTRY:%.*]] ], [ [[SUM_INC_LCSSA]], [[LOOP_EXIT_LOOPEXIT]] ]
+; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ -0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP9]], [[LOOP]] ]
; CHECK-NEXT: ret float [[SUM_LCSSA]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll b/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll
index 90f3df5..70b05ac 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll
@@ -50,23 +50,6 @@ define void @smax_call_uniform(ptr %dst, i64 %x) {
; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[ELSE:.*]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: [[REM1:%.*]] = urem i64 [[MUL]], [[X]]
-; CHECK-NEXT: [[SMAX:%.*]] = tail call i64 @llvm.smax.i64(i64 [[REM1]], i64 0)
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1, %[[LOOP_HEADER]] ], [ [[SMAX]], %[[ELSE]] ]
-; CHECK-NEXT: [[IV_NEXT:%.*]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IV_NEXT]]
-; CHECK-NEXT: store i64 0, ptr [[GEP1]], align 8
-; CHECK-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT1]], 1024
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll
new file mode 100644
index 0000000..8784873
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll
@@ -0,0 +1,460 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph:" --version 6
+; RUN: opt -p loop-vectorize -mtriple=x86_64-linux-gnu -S %s | FileCheck --check-prefix=I64 %s
+; RUN: opt -p loop-vectorize -mtriple=i386-pc-linux-gnu -S %s | FileCheck --check-prefix=I32 %s
+
+
+define void @test_store_initially_interleave(i32 %n, ptr noalias %src) #0 {
+; I64-LABEL: define void @test_store_initially_interleave(
+; I64-SAME: i32 [[N:%.*]], ptr noalias [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
+; I64-NEXT: [[ITER_CHECK:.*:]]
+; I64-NEXT: [[TMP4:%.*]] = add i32 [[N]], 1
+; I64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP4]], 4
+; I64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; I64: [[VECTOR_SCEVCHECK]]:
+; I64-NEXT: [[TMP1:%.*]] = icmp slt i32 [[N]], 0
+; I64-NEXT: br i1 [[TMP1]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; I64: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; I64-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ule i32 [[TMP4]], 16
+; I64-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; I64: [[VECTOR_PH]]:
+; I64-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP4]], 16
+; I64-NEXT: [[TMP2:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
+; I64-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 16, i32 [[N_MOD_VF]]
+; I64-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP4]], [[TMP3]]
+; I64-NEXT: br label %[[VECTOR_BODY:.*]]
+; I64: [[VECTOR_BODY]]:
+; I64-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; I64-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; I64-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
+; I64-NEXT: [[STEP_ADD_2:%.*]] = add <4 x i32> [[STEP_ADD]], splat (i32 4)
+; I64-NEXT: [[STEP_ADD_3:%.*]] = add <4 x i32> [[STEP_ADD_2]], splat (i32 4)
+; I64-NEXT: [[IV:%.*]] = add i32 [[INDEX]], 0
+; I64-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1
+; I64-NEXT: [[TMP6:%.*]] = add i32 [[INDEX]], 2
+; I64-NEXT: [[TMP7:%.*]] = add i32 [[INDEX]], 3
+; I64-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 4
+; I64-NEXT: [[TMP9:%.*]] = add i32 [[INDEX]], 5
+; I64-NEXT: [[TMP10:%.*]] = add i32 [[INDEX]], 6
+; I64-NEXT: [[TMP11:%.*]] = add i32 [[INDEX]], 7
+; I64-NEXT: [[TMP12:%.*]] = add i32 [[INDEX]], 8
+; I64-NEXT: [[TMP13:%.*]] = add i32 [[INDEX]], 9
+; I64-NEXT: [[TMP14:%.*]] = add i32 [[INDEX]], 10
+; I64-NEXT: [[TMP15:%.*]] = add i32 [[INDEX]], 11
+; I64-NEXT: [[TMP16:%.*]] = add i32 [[INDEX]], 12
+; I64-NEXT: [[TMP17:%.*]] = add i32 [[INDEX]], 13
+; I64-NEXT: [[TMP18:%.*]] = add i32 [[INDEX]], 14
+; I64-NEXT: [[TMP19:%.*]] = add i32 [[INDEX]], 15
+; I64-NEXT: [[TMP20:%.*]] = uitofp <4 x i32> [[VEC_IND]] to <4 x double>
+; I64-NEXT: [[TMP21:%.*]] = uitofp <4 x i32> [[STEP_ADD]] to <4 x double>
+; I64-NEXT: [[TMP22:%.*]] = uitofp <4 x i32> [[STEP_ADD_2]] to <4 x double>
+; I64-NEXT: [[TMP23:%.*]] = uitofp <4 x i32> [[STEP_ADD_3]] to <4 x double>
+; I64-NEXT: [[ADD_PTR_I:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[IV]]
+; I64-NEXT: [[TMP25:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP5]]
+; I64-NEXT: [[TMP26:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP6]]
+; I64-NEXT: [[TMP27:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP7]]
+; I64-NEXT: [[TMP28:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP8]]
+; I64-NEXT: [[TMP29:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP9]]
+; I64-NEXT: [[TMP30:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP10]]
+; I64-NEXT: [[TMP31:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP11]]
+; I64-NEXT: [[TMP32:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP12]]
+; I64-NEXT: [[TMP33:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP13]]
+; I64-NEXT: [[TMP34:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP14]]
+; I64-NEXT: [[TMP35:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP15]]
+; I64-NEXT: [[TMP36:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP16]]
+; I64-NEXT: [[TMP37:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP17]]
+; I64-NEXT: [[TMP38:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP18]]
+; I64-NEXT: [[TMP39:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP19]]
+; I64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADD_PTR_I]], align 4
+; I64-NEXT: [[TMP41:%.*]] = load ptr, ptr [[TMP25]], align 4
+; I64-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP26]], align 4
+; I64-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP27]], align 4
+; I64-NEXT: [[TMP44:%.*]] = load ptr, ptr [[TMP28]], align 4
+; I64-NEXT: [[TMP45:%.*]] = load ptr, ptr [[TMP29]], align 4
+; I64-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP30]], align 4
+; I64-NEXT: [[TMP47:%.*]] = load ptr, ptr [[TMP31]], align 4
+; I64-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP32]], align 4
+; I64-NEXT: [[TMP49:%.*]] = load ptr, ptr [[TMP33]], align 4
+; I64-NEXT: [[TMP50:%.*]] = load ptr, ptr [[TMP34]], align 4
+; I64-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP35]], align 4
+; I64-NEXT: [[TMP52:%.*]] = load ptr, ptr [[TMP36]], align 4
+; I64-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP37]], align 4
+; I64-NEXT: [[TMP54:%.*]] = load ptr, ptr [[TMP38]], align 4
+; I64-NEXT: [[TMP55:%.*]] = load ptr, ptr [[TMP39]], align 4
+; I64-NEXT: [[CONV:%.*]] = extractelement <4 x double> [[TMP20]], i32 0
+; I64-NEXT: store double [[CONV]], ptr [[TMP0]], align 4
+; I64-NEXT: [[TMP57:%.*]] = extractelement <4 x double> [[TMP20]], i32 1
+; I64-NEXT: store double [[TMP57]], ptr [[TMP41]], align 4
+; I64-NEXT: [[TMP58:%.*]] = extractelement <4 x double> [[TMP20]], i32 2
+; I64-NEXT: store double [[TMP58]], ptr [[TMP42]], align 4
+; I64-NEXT: [[TMP59:%.*]] = extractelement <4 x double> [[TMP20]], i32 3
+; I64-NEXT: store double [[TMP59]], ptr [[TMP43]], align 4
+; I64-NEXT: [[TMP60:%.*]] = extractelement <4 x double> [[TMP21]], i32 0
+; I64-NEXT: store double [[TMP60]], ptr [[TMP44]], align 4
+; I64-NEXT: [[TMP61:%.*]] = extractelement <4 x double> [[TMP21]], i32 1
+; I64-NEXT: store double [[TMP61]], ptr [[TMP45]], align 4
+; I64-NEXT: [[TMP62:%.*]] = extractelement <4 x double> [[TMP21]], i32 2
+; I64-NEXT: store double [[TMP62]], ptr [[TMP46]], align 4
+; I64-NEXT: [[TMP63:%.*]] = extractelement <4 x double> [[TMP21]], i32 3
+; I64-NEXT: store double [[TMP63]], ptr [[TMP47]], align 4
+; I64-NEXT: [[TMP64:%.*]] = extractelement <4 x double> [[TMP22]], i32 0
+; I64-NEXT: store double [[TMP64]], ptr [[TMP48]], align 4
+; I64-NEXT: [[TMP65:%.*]] = extractelement <4 x double> [[TMP22]], i32 1
+; I64-NEXT: store double [[TMP65]], ptr [[TMP49]], align 4
+; I64-NEXT: [[TMP66:%.*]] = extractelement <4 x double> [[TMP22]], i32 2
+; I64-NEXT: store double [[TMP66]], ptr [[TMP50]], align 4
+; I64-NEXT: [[TMP67:%.*]] = extractelement <4 x double> [[TMP22]], i32 3
+; I64-NEXT: store double [[TMP67]], ptr [[TMP51]], align 4
+; I64-NEXT: [[TMP68:%.*]] = extractelement <4 x double> [[TMP23]], i32 0
+; I64-NEXT: store double [[TMP68]], ptr [[TMP52]], align 4
+; I64-NEXT: [[TMP69:%.*]] = extractelement <4 x double> [[TMP23]], i32 1
+; I64-NEXT: store double [[TMP69]], ptr [[TMP53]], align 4
+; I64-NEXT: [[TMP70:%.*]] = extractelement <4 x double> [[TMP23]], i32 2
+; I64-NEXT: store double [[TMP70]], ptr [[TMP54]], align 4
+; I64-NEXT: [[TMP71:%.*]] = extractelement <4 x double> [[TMP23]], i32 3
+; I64-NEXT: store double [[TMP71]], ptr [[TMP55]], align 4
+; I64-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
+; I64-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD_3]], splat (i32 4)
+; I64-NEXT: [[TMP72:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; I64-NEXT: br i1 [[TMP72]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; I64: [[MIDDLE_BLOCK]]:
+; I64-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; I64: [[VEC_EPILOG_ITER_CHECK]]:
+; I64-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP3]], 4
+; I64-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]]
+; I64: [[VEC_EPILOG_PH]]:
+; I64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; I64-NEXT: [[N_MOD_VF2:%.*]] = urem i32 [[TMP4]], 4
+; I64-NEXT: [[TMP73:%.*]] = icmp eq i32 [[N_MOD_VF2]], 0
+; I64-NEXT: [[TMP74:%.*]] = select i1 [[TMP73]], i32 4, i32 [[N_MOD_VF2]]
+; I64-NEXT: [[N_VEC3:%.*]] = sub i32 [[TMP4]], [[TMP74]]
+; I64-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[BC_RESUME_VAL]], i64 0
+; I64-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; I64-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3>
+; I64-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; I64: [[VEC_EPILOG_VECTOR_BODY]]:
+; I64-NEXT: [[INDEX4:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; I64-NEXT: [[VEC_IND5:%.*]] = phi <4 x i32> [ [[INDUCTION]], %[[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT7:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; I64-NEXT: [[TMP75:%.*]] = add i32 [[INDEX4]], 0
+; I64-NEXT: [[TMP76:%.*]] = add i32 [[INDEX4]], 1
+; I64-NEXT: [[TMP77:%.*]] = add i32 [[INDEX4]], 2
+; I64-NEXT: [[TMP78:%.*]] = add i32 [[INDEX4]], 3
+; I64-NEXT: [[TMP79:%.*]] = uitofp <4 x i32> [[VEC_IND5]] to <4 x double>
+; I64-NEXT: [[TMP80:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP75]]
+; I64-NEXT: [[TMP81:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP76]]
+; I64-NEXT: [[TMP82:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP77]]
+; I64-NEXT: [[TMP83:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP78]]
+; I64-NEXT: [[TMP84:%.*]] = load ptr, ptr [[TMP80]], align 4
+; I64-NEXT: [[TMP85:%.*]] = load ptr, ptr [[TMP81]], align 4
+; I64-NEXT: [[TMP86:%.*]] = load ptr, ptr [[TMP82]], align 4
+; I64-NEXT: [[TMP87:%.*]] = load ptr, ptr [[TMP83]], align 4
+; I64-NEXT: [[TMP88:%.*]] = extractelement <4 x double> [[TMP79]], i32 0
+; I64-NEXT: store double [[TMP88]], ptr [[TMP84]], align 4
+; I64-NEXT: [[TMP89:%.*]] = extractelement <4 x double> [[TMP79]], i32 1
+; I64-NEXT: store double [[TMP89]], ptr [[TMP85]], align 4
+; I64-NEXT: [[TMP90:%.*]] = extractelement <4 x double> [[TMP79]], i32 2
+; I64-NEXT: store double [[TMP90]], ptr [[TMP86]], align 4
+; I64-NEXT: [[TMP91:%.*]] = extractelement <4 x double> [[TMP79]], i32 3
+; I64-NEXT: store double [[TMP91]], ptr [[TMP87]], align 4
+; I64-NEXT: [[INDEX_NEXT6]] = add nuw i32 [[INDEX4]], 4
+; I64-NEXT: [[VEC_IND_NEXT7]] = add <4 x i32> [[VEC_IND5]], splat (i32 4)
+; I64-NEXT: [[TMP92:%.*]] = icmp eq i32 [[INDEX_NEXT6]], [[N_VEC3]]
+; I64-NEXT: br i1 [[TMP92]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; I64: [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; I64-NEXT: br label %[[VEC_EPILOG_SCALAR_PH]]
+; I64: [[VEC_EPILOG_SCALAR_PH]]:
+;
+; I32-LABEL: define void @test_store_initially_interleave(
+; I32-SAME: i32 [[N:%.*]], ptr noalias [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
+; I32-NEXT: [[ENTRY:.*:]]
+; I32-NEXT: [[TMP0:%.*]] = add i32 [[N]], 1
+; I32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 4
+; I32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; I32: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; I32-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ule i32 [[TMP0]], 16
+; I32-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; I32: [[VECTOR_PH]]:
+; I32-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 16
+; I32-NEXT: [[TMP1:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
+; I32-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 16, i32 [[N_MOD_VF]]
+; I32-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP2]]
+; I32-NEXT: br label %[[VECTOR_BODY:.*]]
+; I32: [[VECTOR_BODY]]:
+; I32-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; I32-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; I32-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
+; I32-NEXT: [[STEP_ADD_2:%.*]] = add <4 x i32> [[STEP_ADD]], splat (i32 4)
+; I32-NEXT: [[STEP_ADD_3:%.*]] = add <4 x i32> [[STEP_ADD_2]], splat (i32 4)
+; I32-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0
+; I32-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1
+; I32-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 2
+; I32-NEXT: [[TMP6:%.*]] = add i32 [[INDEX]], 3
+; I32-NEXT: [[TMP7:%.*]] = add i32 [[INDEX]], 4
+; I32-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 5
+; I32-NEXT: [[TMP9:%.*]] = add i32 [[INDEX]], 6
+; I32-NEXT: [[TMP10:%.*]] = add i32 [[INDEX]], 7
+; I32-NEXT: [[TMP11:%.*]] = add i32 [[INDEX]], 8
+; I32-NEXT: [[TMP12:%.*]] = add i32 [[INDEX]], 9
+; I32-NEXT: [[TMP13:%.*]] = add i32 [[INDEX]], 10
+; I32-NEXT: [[TMP14:%.*]] = add i32 [[INDEX]], 11
+; I32-NEXT: [[TMP40:%.*]] = add i32 [[INDEX]], 12
+; I32-NEXT: [[TMP41:%.*]] = add i32 [[INDEX]], 13
+; I32-NEXT: [[TMP42:%.*]] = add i32 [[INDEX]], 14
+; I32-NEXT: [[TMP43:%.*]] = add i32 [[INDEX]], 15
+; I32-NEXT: [[TMP44:%.*]] = uitofp <4 x i32> [[VEC_IND]] to <4 x double>
+; I32-NEXT: [[TMP45:%.*]] = uitofp <4 x i32> [[STEP_ADD]] to <4 x double>
+; I32-NEXT: [[TMP46:%.*]] = uitofp <4 x i32> [[STEP_ADD_2]] to <4 x double>
+; I32-NEXT: [[TMP55:%.*]] = uitofp <4 x i32> [[STEP_ADD_3]] to <4 x double>
+; I32-NEXT: [[TMP15:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP3]]
+; I32-NEXT: [[TMP16:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP4]]
+; I32-NEXT: [[TMP17:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP5]]
+; I32-NEXT: [[TMP18:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP6]]
+; I32-NEXT: [[TMP19:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP7]]
+; I32-NEXT: [[TMP20:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP8]]
+; I32-NEXT: [[TMP21:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP9]]
+; I32-NEXT: [[TMP22:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP10]]
+; I32-NEXT: [[TMP56:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP11]]
+; I32-NEXT: [[TMP57:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP12]]
+; I32-NEXT: [[TMP58:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP13]]
+; I32-NEXT: [[TMP59:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP14]]
+; I32-NEXT: [[TMP60:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP40]]
+; I32-NEXT: [[TMP61:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP41]]
+; I32-NEXT: [[TMP62:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP42]]
+; I32-NEXT: [[TMP71:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP43]]
+; I32-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP15]], align 4
+; I32-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP16]], align 4
+; I32-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP17]], align 4
+; I32-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP18]], align 4
+; I32-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP19]], align 4
+; I32-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP20]], align 4
+; I32-NEXT: [[TMP29:%.*]] = load ptr, ptr [[TMP21]], align 4
+; I32-NEXT: [[TMP30:%.*]] = load ptr, ptr [[TMP22]], align 4
+; I32-NEXT: [[TMP47:%.*]] = load ptr, ptr [[TMP56]], align 4
+; I32-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP57]], align 4
+; I32-NEXT: [[TMP49:%.*]] = load ptr, ptr [[TMP58]], align 4
+; I32-NEXT: [[TMP50:%.*]] = load ptr, ptr [[TMP59]], align 4
+; I32-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP60]], align 4
+; I32-NEXT: [[TMP52:%.*]] = load ptr, ptr [[TMP61]], align 4
+; I32-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP62]], align 4
+; I32-NEXT: [[TMP54:%.*]] = load ptr, ptr [[TMP71]], align 4
+; I32-NEXT: [[TMP31:%.*]] = extractelement <4 x double> [[TMP44]], i32 0
+; I32-NEXT: store double [[TMP31]], ptr [[TMP23]], align 4
+; I32-NEXT: [[TMP32:%.*]] = extractelement <4 x double> [[TMP44]], i32 1
+; I32-NEXT: store double [[TMP32]], ptr [[TMP24]], align 4
+; I32-NEXT: [[TMP33:%.*]] = extractelement <4 x double> [[TMP44]], i32 2
+; I32-NEXT: store double [[TMP33]], ptr [[TMP25]], align 4
+; I32-NEXT: [[TMP34:%.*]] = extractelement <4 x double> [[TMP44]], i32 3
+; I32-NEXT: store double [[TMP34]], ptr [[TMP26]], align 4
+; I32-NEXT: [[TMP35:%.*]] = extractelement <4 x double> [[TMP45]], i32 0
+; I32-NEXT: store double [[TMP35]], ptr [[TMP27]], align 4
+; I32-NEXT: [[TMP36:%.*]] = extractelement <4 x double> [[TMP45]], i32 1
+; I32-NEXT: store double [[TMP36]], ptr [[TMP28]], align 4
+; I32-NEXT: [[TMP37:%.*]] = extractelement <4 x double> [[TMP45]], i32 2
+; I32-NEXT: store double [[TMP37]], ptr [[TMP29]], align 4
+; I32-NEXT: [[TMP38:%.*]] = extractelement <4 x double> [[TMP45]], i32 3
+; I32-NEXT: store double [[TMP38]], ptr [[TMP30]], align 4
+; I32-NEXT: [[TMP63:%.*]] = extractelement <4 x double> [[TMP46]], i32 0
+; I32-NEXT: store double [[TMP63]], ptr [[TMP47]], align 4
+; I32-NEXT: [[TMP64:%.*]] = extractelement <4 x double> [[TMP46]], i32 1
+; I32-NEXT: store double [[TMP64]], ptr [[TMP48]], align 4
+; I32-NEXT: [[TMP65:%.*]] = extractelement <4 x double> [[TMP46]], i32 2
+; I32-NEXT: store double [[TMP65]], ptr [[TMP49]], align 4
+; I32-NEXT: [[TMP66:%.*]] = extractelement <4 x double> [[TMP46]], i32 3
+; I32-NEXT: store double [[TMP66]], ptr [[TMP50]], align 4
+; I32-NEXT: [[TMP67:%.*]] = extractelement <4 x double> [[TMP55]], i32 0
+; I32-NEXT: store double [[TMP67]], ptr [[TMP51]], align 4
+; I32-NEXT: [[TMP68:%.*]] = extractelement <4 x double> [[TMP55]], i32 1
+; I32-NEXT: store double [[TMP68]], ptr [[TMP52]], align 4
+; I32-NEXT: [[TMP69:%.*]] = extractelement <4 x double> [[TMP55]], i32 2
+; I32-NEXT: store double [[TMP69]], ptr [[TMP53]], align 4
+; I32-NEXT: [[TMP70:%.*]] = extractelement <4 x double> [[TMP55]], i32 3
+; I32-NEXT: store double [[TMP70]], ptr [[TMP54]], align 4
+; I32-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
+; I32-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD_3]], splat (i32 4)
+; I32-NEXT: [[TMP39:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; I32-NEXT: br i1 [[TMP39]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; I32: [[MIDDLE_BLOCK]]:
+; I32-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; I32: [[VEC_EPILOG_ITER_CHECK]]:
+; I32-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP2]], 4
+; I32-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]]
+; I32: [[VEC_EPILOG_PH]]:
+; I32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; I32-NEXT: [[N_MOD_VF2:%.*]] = urem i32 [[TMP0]], 4
+; I32-NEXT: [[TMP72:%.*]] = icmp eq i32 [[N_MOD_VF2]], 0
+; I32-NEXT: [[TMP73:%.*]] = select i1 [[TMP72]], i32 4, i32 [[N_MOD_VF2]]
+; I32-NEXT: [[N_VEC3:%.*]] = sub i32 [[TMP0]], [[TMP73]]
+; I32-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[BC_RESUME_VAL]], i64 0
+; I32-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; I32-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3>
+; I32-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; I32: [[VEC_EPILOG_VECTOR_BODY]]:
+; I32-NEXT: [[INDEX4:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; I32-NEXT: [[VEC_IND5:%.*]] = phi <4 x i32> [ [[INDUCTION]], %[[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT7:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; I32-NEXT: [[TMP74:%.*]] = add i32 [[INDEX4]], 0
+; I32-NEXT: [[TMP75:%.*]] = add i32 [[INDEX4]], 1
+; I32-NEXT: [[TMP76:%.*]] = add i32 [[INDEX4]], 2
+; I32-NEXT: [[TMP77:%.*]] = add i32 [[INDEX4]], 3
+; I32-NEXT: [[TMP78:%.*]] = uitofp <4 x i32> [[VEC_IND5]] to <4 x double>
+; I32-NEXT: [[TMP79:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP74]]
+; I32-NEXT: [[TMP80:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP75]]
+; I32-NEXT: [[TMP81:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP76]]
+; I32-NEXT: [[TMP82:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP77]]
+; I32-NEXT: [[TMP83:%.*]] = load ptr, ptr [[TMP79]], align 4
+; I32-NEXT: [[TMP84:%.*]] = load ptr, ptr [[TMP80]], align 4
+; I32-NEXT: [[TMP85:%.*]] = load ptr, ptr [[TMP81]], align 4
+; I32-NEXT: [[TMP86:%.*]] = load ptr, ptr [[TMP82]], align 4
+; I32-NEXT: [[TMP87:%.*]] = extractelement <4 x double> [[TMP78]], i32 0
+; I32-NEXT: store double [[TMP87]], ptr [[TMP83]], align 4
+; I32-NEXT: [[TMP88:%.*]] = extractelement <4 x double> [[TMP78]], i32 1
+; I32-NEXT: store double [[TMP88]], ptr [[TMP84]], align 4
+; I32-NEXT: [[TMP89:%.*]] = extractelement <4 x double> [[TMP78]], i32 2
+; I32-NEXT: store double [[TMP89]], ptr [[TMP85]], align 4
+; I32-NEXT: [[TMP90:%.*]] = extractelement <4 x double> [[TMP78]], i32 3
+; I32-NEXT: store double [[TMP90]], ptr [[TMP86]], align 4
+; I32-NEXT: [[INDEX_NEXT6]] = add nuw i32 [[INDEX4]], 4
+; I32-NEXT: [[VEC_IND_NEXT7]] = add <4 x i32> [[VEC_IND5]], splat (i32 4)
+; I32-NEXT: [[TMP91:%.*]] = icmp eq i32 [[INDEX_NEXT6]], [[N_VEC3]]
+; I32-NEXT: br i1 [[TMP91]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; I32: [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; I32-NEXT: br label %[[VEC_EPILOG_SCALAR_PH]]
+; I32: [[VEC_EPILOG_SCALAR_PH]]:
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %inc, %loop ]
+ %conv = uitofp i32 %iv to double
+ %add.ptr.i = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 %iv
+ %0 = load ptr, ptr %add.ptr.i, align 4
+ store double %conv, ptr %0, align 4
+ %inc = add i32 %iv, 1
+ %ec = icmp eq i32 %iv, %n
+ br i1 %ec, label %exit, label %loop
+
+exit: ; preds = %loop
+ ret void
+}
+
+define void @test_store_loaded_value(ptr noalias %src, ptr noalias %dst, i32 %n) #0 {
+; I64-LABEL: define void @test_store_loaded_value(
+; I64-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; I64-NEXT: [[BB:.*:]]
+; I64-NEXT: [[PRE:%.*]] = icmp slt i32 [[N]], 1
+; I64-NEXT: br i1 [[PRE]], [[EXIT:label %.*]], label %[[PH:.*]]
+; I64: [[PH]]:
+; I64-NEXT: [[N_EXT:%.*]] = zext i32 [[N]] to i64
+; I64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_EXT]], 4
+; I64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; I64: [[VECTOR_PH]]:
+; I64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_EXT]], 4
+; I64-NEXT: [[N_VEC:%.*]] = sub i64 [[N_EXT]], [[N_MOD_VF]]
+; I64-NEXT: br label %[[VECTOR_BODY:.*]]
+; I64: [[VECTOR_BODY]]:
+; I64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; I64-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; I64-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
+; I64-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2
+; I64-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3
+; I64-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP0]]
+; I64-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP1]]
+; I64-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP2]]
+; I64-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]]
+; I64-NEXT: [[TMP8:%.*]] = load double, ptr [[TMP4]], align 8
+; I64-NEXT: [[TMP9:%.*]] = load double, ptr [[TMP5]], align 8
+; I64-NEXT: [[TMP10:%.*]] = load double, ptr [[TMP6]], align 8
+; I64-NEXT: [[TMP11:%.*]] = load double, ptr [[TMP7]], align 8
+; I64-NEXT: [[TMP12:%.*]] = shl i64 [[TMP0]], 1
+; I64-NEXT: [[TMP13:%.*]] = shl i64 [[TMP1]], 1
+; I64-NEXT: [[TMP14:%.*]] = shl i64 [[TMP2]], 1
+; I64-NEXT: [[TMP15:%.*]] = shl i64 [[TMP3]], 1
+; I64-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP12]]
+; I64-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]]
+; I64-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP14]]
+; I64-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP15]]
+; I64-NEXT: store double [[TMP8]], ptr [[TMP16]], align 8
+; I64-NEXT: store double [[TMP9]], ptr [[TMP17]], align 8
+; I64-NEXT: store double [[TMP10]], ptr [[TMP18]], align 8
+; I64-NEXT: store double [[TMP11]], ptr [[TMP19]], align 8
+; I64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; I64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; I64-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; I64: [[MIDDLE_BLOCK]]:
+; I64-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_EXT]], [[N_VEC]]
+; I64-NEXT: br i1 [[CMP_N]], [[EXIT_LOOPEXIT:label %.*]], label %[[SCALAR_PH]]
+; I64: [[SCALAR_PH]]:
+;
+; I32-LABEL: define void @test_store_loaded_value(
+; I32-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; I32-NEXT: [[BB:.*:]]
+; I32-NEXT: [[PRE:%.*]] = icmp slt i32 [[N]], 1
+; I32-NEXT: br i1 [[PRE]], [[EXIT:label %.*]], label %[[PH:.*]]
+; I32: [[PH]]:
+; I32-NEXT: [[N_EXT:%.*]] = zext i32 [[N]] to i64
+; I32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_EXT]], 4
+; I32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; I32: [[VECTOR_PH]]:
+; I32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_EXT]], 4
+; I32-NEXT: [[N_VEC:%.*]] = sub i64 [[N_EXT]], [[N_MOD_VF]]
+; I32-NEXT: br label %[[VECTOR_BODY:.*]]
+; I32: [[VECTOR_BODY]]:
+; I32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; I32-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; I32-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
+; I32-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2
+; I32-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3
+; I32-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP0]]
+; I32-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP1]]
+; I32-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP2]]
+; I32-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]]
+; I32-NEXT: [[TMP8:%.*]] = load double, ptr [[TMP4]], align 8
+; I32-NEXT: [[TMP9:%.*]] = load double, ptr [[TMP5]], align 8
+; I32-NEXT: [[TMP10:%.*]] = load double, ptr [[TMP6]], align 8
+; I32-NEXT: [[TMP11:%.*]] = load double, ptr [[TMP7]], align 8
+; I32-NEXT: [[TMP12:%.*]] = shl i64 [[TMP0]], 1
+; I32-NEXT: [[TMP13:%.*]] = shl i64 [[TMP1]], 1
+; I32-NEXT: [[TMP14:%.*]] = shl i64 [[TMP2]], 1
+; I32-NEXT: [[TMP15:%.*]] = shl i64 [[TMP3]], 1
+; I32-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP12]]
+; I32-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]]
+; I32-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP14]]
+; I32-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP15]]
+; I32-NEXT: store double [[TMP8]], ptr [[TMP16]], align 8
+; I32-NEXT: store double [[TMP9]], ptr [[TMP17]], align 8
+; I32-NEXT: store double [[TMP10]], ptr [[TMP18]], align 8
+; I32-NEXT: store double [[TMP11]], ptr [[TMP19]], align 8
+; I32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; I32-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; I32-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; I32: [[MIDDLE_BLOCK]]:
+; I32-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_EXT]], [[N_VEC]]
+; I32-NEXT: br i1 [[CMP_N]], [[EXIT_LOOPEXIT:label %.*]], label %[[SCALAR_PH]]
+; I32: [[SCALAR_PH]]:
+;
+bb:
+ %pre = icmp slt i32 %n, 1
+ br i1 %pre, label %exit, label %ph
+
+ph:
+ %n.ext = zext i32 %n to i64
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %ph ], [ %iv.next, %loop ]
+ %iv.next = add i64 %iv, 1
+ %gep.src = getelementptr i8, ptr %src, i64 %iv
+ %l = load double, ptr %gep.src, align 8
+ %sext = shl i64 %iv, 1
+ %gep.dst = getelementptr i8, ptr %dst, i64 %sext
+ store double %l, ptr %gep.dst, align 8
+ %ec = icmp eq i64 %iv.next, %n.ext
+ br i1 %ec, label %exit, label %loop, !llvm.loop !0
+
+exit:
+ ret void
+}
+
+attributes #0 = { "target-cpu"="znver2" }
+
+!0 = distinct !{!0, !1}
+!1 = !{!"llvm.loop.vectorize.enable", i1 true}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll
index b713a39..272b62b 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll
@@ -33,8 +33,6 @@ define void @value_defined_in_loop1_used_for_trip_counts(i32 %start, i1 %c, ptr
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT_1_LOOPEXIT1:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_3:.*]]
; CHECK: [[LOOP_2_PREHEADER]]:
; CHECK-NEXT: br label %[[LOOP_2:.*]]
; CHECK: [[LOOP_2]]:
@@ -48,13 +46,6 @@ define void @value_defined_in_loop1_used_for_trip_counts(i32 %start, i1 %c, ptr
; CHECK-NEXT: store i16 0, ptr [[GEP_DST]], align 2
; CHECK-NEXT: [[EC_2:%.*]] = icmp ult i64 [[IV_2]], [[IV_1_LCSSA]]
; CHECK-NEXT: br i1 [[EC_2]], label %[[LOOP_2]], label %[[EXIT_1_LOOPEXIT:.*]]
-; CHECK: [[LOOP_3]]:
-; CHECK-NEXT: [[IV_4:%.*]] = phi i64 [ [[IV_4_NEXT:%.*]], %[[LOOP_3]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[GEP_DST_2:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_4]]
-; CHECK-NEXT: store i8 0, ptr [[GEP_DST_2]], align 1
-; CHECK-NEXT: [[IV_4_NEXT]] = add i64 [[IV_4]], 1
-; CHECK-NEXT: [[EC_3:%.*]] = icmp ult i64 [[IV_4_NEXT]], [[IV_1_LCSSA]]
-; CHECK-NEXT: br i1 [[EC_3]], label %[[LOOP_3]], label %[[EXIT_1_LOOPEXIT1]]
; CHECK: [[EXIT_1_LOOPEXIT]]:
; CHECK-NEXT: br label %[[EXIT_1:.*]]
; CHECK: [[EXIT_1_LOOPEXIT1]]:
diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
index f877e1b..e99ffda 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
@@ -39,12 +39,8 @@ define void @example1() optsize {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[TMP7:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[TMP6:%.*]]
; CHECK: 6:
-; CHECK-NEXT: br i1 poison, label [[TMP7]], label [[TMP6]]
-; CHECK: 7:
; CHECK-NEXT: ret void
;
br label %1
@@ -123,8 +119,6 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[DOT_PREHEADER_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[DOTLR_PH5:%.*]]
; CHECK: ..preheader_crit_edge:
; CHECK-NEXT: [[PHITMP:%.*]] = zext nneg i32 [[N]] to i64
; CHECK-NEXT: br label [[DOTPREHEADER]]
@@ -134,7 +128,7 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: br i1 [[TMP16]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH_PREHEADER:%.*]]
; CHECK: .lr.ph.preheader:
; CHECK-NEXT: br label [[VECTOR_PH8:%.*]]
-; CHECK: vector.ph8:
+; CHECK: vector.ph7:
; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[N]] to i64
; CHECK-NEXT: [[N_RND_UP10:%.*]] = add nuw nsw i64 [[TMP17]], 3
; CHECK-NEXT: [[N_VEC12:%.*]] = and i64 [[N_RND_UP10]], 8589934588
@@ -142,7 +136,7 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: [[BROADCAST_SPLATINSERT19:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_114]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT20:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT19]], <4 x i64> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY13:%.*]]
-; CHECK: vector.body15:
+; CHECK: vector.body14:
; CHECK-NEXT: [[INDEX16:%.*]] = phi i64 [ 0, [[VECTOR_PH8]] ], [ [[INDEX_NEXT29:%.*]], [[PRED_STORE_CONTINUE26:%.*]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[I_0_LCSSA]], [[INDEX16]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT17:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX16]], i64 0
@@ -151,7 +145,7 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: [[TMP18:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT20]]
; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP18]], i64 0
; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF19:%.*]], label [[PRED_STORE_CONTINUE20:%.*]]
-; CHECK: pred.store.if19:
+; CHECK: pred.store.if18:
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr @c, i64 [[OFFSET_IDX]]
@@ -160,10 +154,10 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: [[TMP25:%.*]] = and i32 [[TMP23]], [[TMP21]]
; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP24]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE20]]
-; CHECK: pred.store.continue20:
+; CHECK: pred.store.continue19:
; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i1> [[TMP18]], i64 1
; CHECK-NEXT: br i1 [[TMP26]], label [[PRED_STORE_IF21:%.*]], label [[PRED_STORE_CONTINUE22:%.*]]
-; CHECK: pred.store.if21:
+; CHECK: pred.store.if20:
; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[OFFSET_IDX]], 1
; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[TMP27]]
; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4
@@ -173,10 +167,10 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: [[TMP33:%.*]] = and i32 [[TMP31]], [[TMP29]]
; CHECK-NEXT: store i32 [[TMP33]], ptr [[TMP32]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE22]]
-; CHECK: pred.store.continue22:
+; CHECK: pred.store.continue21:
; CHECK-NEXT: [[TMP34:%.*]] = extractelement <4 x i1> [[TMP18]], i64 2
; CHECK-NEXT: br i1 [[TMP34]], label [[PRED_STORE_IF23:%.*]], label [[PRED_STORE_CONTINUE24:%.*]]
-; CHECK: pred.store.if23:
+; CHECK: pred.store.if22:
; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[OFFSET_IDX]], 2
; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[TMP35]]
; CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP36]], align 4
@@ -186,10 +180,10 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: [[TMP41:%.*]] = and i32 [[TMP39]], [[TMP37]]
; CHECK-NEXT: store i32 [[TMP41]], ptr [[TMP40]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE24]]
-; CHECK: pred.store.continue24:
+; CHECK: pred.store.continue23:
; CHECK-NEXT: [[TMP42:%.*]] = extractelement <4 x i1> [[TMP18]], i64 3
; CHECK-NEXT: br i1 [[TMP42]], label [[PRED_STORE_IF25:%.*]], label [[PRED_STORE_CONTINUE26]]
-; CHECK: pred.store.if25:
+; CHECK: pred.store.if24:
; CHECK-NEXT: [[TMP43:%.*]] = add i64 [[OFFSET_IDX]], 3
; CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[TMP43]]
; CHECK-NEXT: [[TMP45:%.*]] = load i32, ptr [[TMP44]], align 4
@@ -199,18 +193,12 @@ define void @example2(i32 %n, i32 %x) optsize {
; CHECK-NEXT: [[TMP49:%.*]] = and i32 [[TMP47]], [[TMP45]]
; CHECK-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE26]]
-; CHECK: pred.store.continue26:
+; CHECK: pred.store.continue25:
; CHECK-NEXT: [[INDEX_NEXT29]] = add nuw i64 [[INDEX16]], 4
; CHECK-NEXT: [[TMP50:%.*]] = icmp eq i64 [[INDEX_NEXT29]], [[N_VEC12]]
-; CHECK-NEXT: br i1 [[TMP50]], label [[MIDDLE_BLOCK28:%.*]], label [[VECTOR_BODY13]], !llvm.loop [[LOOP4:![0-9]+]]
-; CHECK: middle.block28:
-; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: scalar.ph7:
+; CHECK-NEXT: br i1 [[TMP50]], label [[MIDDLE_BLOCK27:%.*]], label [[VECTOR_BODY13]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: middle.block27:
; CHECK-NEXT: br label [[DOTLR_PH1:%.*]]
-; CHECK: .lr.ph5:
-; CHECK-NEXT: br i1 poison, label [[DOT_PREHEADER_CRIT_EDGE]], label [[DOTLR_PH5]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOTLR_PH]], label [[DOTLR_PH1]]
; CHECK: ._crit_edge.loopexit:
; CHECK-NEXT: br label [[DOT_CRIT_EDGE]]
; CHECK: ._crit_edge:
@@ -328,11 +316,7 @@ define void @example3(i32 %n, ptr noalias nocapture %p, ptr noalias nocapture %q
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]]
; CHECK: ._crit_edge.loopexit:
; CHECK-NEXT: br label [[DOT_CRIT_EDGE]]
; CHECK: ._crit_edge:
@@ -418,12 +402,8 @@ define void @example23b(ptr noalias nocapture %src, ptr noalias nocapture %dst)
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[TMP5:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[TMP4:%.*]]
; CHECK: 4:
-; CHECK-NEXT: br i1 poison, label [[TMP5]], label [[TMP4]]
-; CHECK: 5:
; CHECK-NEXT: ret void
;
br label %1
@@ -516,12 +496,8 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst)
; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[TMP26:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[TMP25:%.*]]
; CHECK: 25:
-; CHECK-NEXT: br i1 poison, label [[TMP26]], label [[TMP25]]
-; CHECK: 26:
; CHECK-NEXT: ret void
;
br label %1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll
index 931c927..15e2678 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll
@@ -15,17 +15,17 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u
; CHECK-NEXT: [[ITER_CHECK:.*]]:
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
; CHECK-NEXT: [[IDXPROM5:%.*]] = sext i32 [[J]] to i64
-; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH1:.*]]
-; CHECK: [[VECTOR_PH1]]:
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP144:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP145:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP146:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP147:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP144:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP145:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP146:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP147:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2
@@ -184,15 +184,15 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u
; CHECK-NEXT: [[TMP149:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX8]])
; CHECK-NEXT: br i1 false, label %[[FOR_COND_CLEANUP:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
; CHECK: [[VEC_EPILOG_ITER_CHECK]]:
-; CHECK-NEXT: br i1 false, label %[[SCALAR_PH]], label %[[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF8:![0-9]+]]
; CHECK: [[VEC_EPILOG_PH]]:
-; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ]
+; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: [[TMP171:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[BC_MERGE_RDX]], i32 0
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDEX9:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ [[TMP171]], %[[VEC_EPILOG_PH]] ], [ [[TMP168:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX9:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ [[TMP171]], %[[VEC_EPILOG_PH]] ], [ [[TMP168:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP172:%.*]] = add i64 [[INDEX9]], 0
; CHECK-NEXT: [[TMP173:%.*]] = add i64 [[INDEX9]], 1
; CHECK-NEXT: [[TMP174:%.*]] = add i64 [[INDEX9]], 2
@@ -216,20 +216,20 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u
; CHECK-NEXT: [[TMP168]] = add <4 x i32> [[TMP167]], [[TMP166]]
; CHECK-NEXT: [[INDEX_NEXT12]] = add nuw i64 [[INDEX9]], 4
; CHECK-NEXT: [[TMP169:%.*]] = icmp eq i64 [[INDEX_NEXT12]], 100
-; CHECK-NEXT: br i1 [[TMP169]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP169]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP170:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP168]])
-; CHECK-NEXT: br i1 true, label %[[FOR_COND_CLEANUP]], label %[[SCALAR_PH]]
-; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: br i1 true, label %[[FOR_COND_CLEANUP]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK: [[VEC_EPILOG_SCALAR_PH]]:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 100, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
; CHECK-NEXT: [[BC_MERGE_RDX13:%.*]] = phi i32 [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
-; CHECK-NEXT: br label %[[FOR_BODY1:.*]]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP]]:
-; CHECK-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], %[[FOR_BODY1]] ], [ [[TMP149]], %[[MIDDLE_BLOCK]] ], [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], %[[FOR_BODY]] ], [ [[TMP149]], %[[MIDDLE_BLOCK]] ], [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[ADD7_LCSSA]]
-; CHECK: [[FOR_BODY1]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY1]] ]
-; CHECK-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[SCALAR_PH]] ], [ [[ADD7]], %[[FOR_BODY1]] ]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD7]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP150:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4, !tbaa [[INT_TBAA1]]
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[INDVARS_IV]], i64 [[IDXPROM5]]
@@ -239,24 +239,24 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u
; CHECK-NEXT: [[ADD7]] = add i32 [[ADD]], [[MUL]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY1]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
;
; MAX-BW-LABEL: define i32 @matrix_row_col(
; MAX-BW-SAME: ptr readonly captures(none) [[DATA:%.*]], i32 [[I:%.*]], i32 [[J:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; MAX-BW-NEXT: [[ITER_CHECK:.*]]:
; MAX-BW-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64
; MAX-BW-NEXT: [[IDXPROM5:%.*]] = sext i32 [[J]] to i64
-; MAX-BW-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; MAX-BW-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; MAX-BW: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; MAX-BW-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
; MAX-BW: [[VECTOR_PH]]:
-; MAX-BW-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH1:.*]]
-; MAX-BW: [[VECTOR_PH1]]:
; MAX-BW-NEXT: br label %[[VECTOR_BODY:.*]]
; MAX-BW: [[VECTOR_BODY]]:
-; MAX-BW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; MAX-BW-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP144:%.*]], %[[VECTOR_BODY]] ]
-; MAX-BW-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP145:%.*]], %[[VECTOR_BODY]] ]
-; MAX-BW-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP146:%.*]], %[[VECTOR_BODY]] ]
-; MAX-BW-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP147:%.*]], %[[VECTOR_BODY]] ]
+; MAX-BW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; MAX-BW-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP144:%.*]], %[[VECTOR_BODY]] ]
+; MAX-BW-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP145:%.*]], %[[VECTOR_BODY]] ]
+; MAX-BW-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP146:%.*]], %[[VECTOR_BODY]] ]
+; MAX-BW-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP147:%.*]], %[[VECTOR_BODY]] ]
; MAX-BW-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
; MAX-BW-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
; MAX-BW-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2
@@ -415,15 +415,15 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u
; MAX-BW-NEXT: [[TMP149:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX8]])
; MAX-BW-NEXT: br i1 false, label %[[FOR_COND_CLEANUP:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
; MAX-BW: [[VEC_EPILOG_ITER_CHECK]]:
-; MAX-BW-NEXT: br i1 false, label %[[SCALAR_PH]], label %[[VEC_EPILOG_PH]]
+; MAX-BW-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF8:![0-9]+]]
; MAX-BW: [[VEC_EPILOG_PH]]:
-; MAX-BW-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ]
-; MAX-BW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ]
+; MAX-BW-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; MAX-BW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; MAX-BW-NEXT: [[TMP171:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[BC_MERGE_RDX]], i32 0
-; MAX-BW-NEXT: br label %[[FOR_BODY:.*]]
-; MAX-BW: [[FOR_BODY]]:
-; MAX-BW-NEXT: [[INDEX9:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], %[[FOR_BODY]] ]
-; MAX-BW-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ [[TMP171]], %[[VEC_EPILOG_PH]] ], [ [[TMP168:%.*]], %[[FOR_BODY]] ]
+; MAX-BW-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; MAX-BW: [[VEC_EPILOG_VECTOR_BODY]]:
+; MAX-BW-NEXT: [[INDEX9:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; MAX-BW-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ [[TMP171]], %[[VEC_EPILOG_PH]] ], [ [[TMP168:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
; MAX-BW-NEXT: [[TMP172:%.*]] = add i64 [[INDEX9]], 0
; MAX-BW-NEXT: [[TMP173:%.*]] = add i64 [[INDEX9]], 1
; MAX-BW-NEXT: [[TMP174:%.*]] = add i64 [[INDEX9]], 2
@@ -447,20 +447,20 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u
; MAX-BW-NEXT: [[TMP168]] = add <4 x i32> [[TMP167]], [[TMP166]]
; MAX-BW-NEXT: [[INDEX_NEXT12]] = add nuw i64 [[INDEX9]], 4
; MAX-BW-NEXT: [[TMP169:%.*]] = icmp eq i64 [[INDEX_NEXT12]], 100
-; MAX-BW-NEXT: br i1 [[TMP169]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; MAX-BW-NEXT: br i1 [[TMP169]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; MAX-BW: [[VEC_EPILOG_MIDDLE_BLOCK]]:
; MAX-BW-NEXT: [[TMP170:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP168]])
-; MAX-BW-NEXT: br i1 true, label %[[FOR_COND_CLEANUP]], label %[[SCALAR_PH]]
-; MAX-BW: [[SCALAR_PH]]:
+; MAX-BW-NEXT: br i1 true, label %[[FOR_COND_CLEANUP]], label %[[VEC_EPILOG_SCALAR_PH]]
+; MAX-BW: [[VEC_EPILOG_SCALAR_PH]]:
; MAX-BW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 100, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
; MAX-BW-NEXT: [[BC_MERGE_RDX13:%.*]] = phi i32 [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
-; MAX-BW-NEXT: br label %[[FOR_BODY1:.*]]
+; MAX-BW-NEXT: br label %[[FOR_BODY:.*]]
; MAX-BW: [[FOR_COND_CLEANUP]]:
-; MAX-BW-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], %[[FOR_BODY1]] ], [ [[TMP149]], %[[MIDDLE_BLOCK]] ], [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; MAX-BW-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], %[[FOR_BODY]] ], [ [[TMP149]], %[[MIDDLE_BLOCK]] ], [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
; MAX-BW-NEXT: ret i32 [[ADD7_LCSSA]]
-; MAX-BW: [[FOR_BODY1]]:
-; MAX-BW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY1]] ]
-; MAX-BW-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[SCALAR_PH]] ], [ [[ADD7]], %[[FOR_BODY1]] ]
+; MAX-BW: [[FOR_BODY]]:
+; MAX-BW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; MAX-BW-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD7]], %[[FOR_BODY]] ]
; MAX-BW-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[INDVARS_IV]]
; MAX-BW-NEXT: [[TMP150:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4, !tbaa [[INT_TBAA1]]
; MAX-BW-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[INDVARS_IV]], i64 [[IDXPROM5]]
@@ -470,7 +470,7 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u
; MAX-BW-NEXT: [[ADD7]] = add i32 [[ADD]], [[MUL]]
; MAX-BW-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; MAX-BW-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100
-; MAX-BW-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY1]], !llvm.loop [[LOOP9:![0-9]+]]
+; MAX-BW-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
;
entry:
%idxprom = sext i32 %i to i64
@@ -555,26 +555,9 @@ define void @test(ptr %A, ptr noalias %B) #0 {
; CHECK-NEXT: store i8 [[TMP35]], ptr [[TMP27]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
-; CHECK-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[IV_0:%.*]] = add nuw nsw i64 [[IV]], 0
-; CHECK-NEXT: [[IV_1:%.*]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[IN0:%.*]] = getelementptr inbounds [1024 x i32], ptr [[A]], i64 0, i64 [[IV_0]]
-; CHECK-NEXT: [[IN1:%.*]] = getelementptr inbounds [1024 x i32], ptr [[A]], i64 0, i64 [[IV_1]]
-; CHECK-NEXT: [[V0:%.*]] = load i32, ptr [[IN0]], align 4
-; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[IN1]], align 4
-; CHECK-NEXT: [[REDUCE_ADD_0:%.*]] = add i32 [[V0]], [[V1]]
-; CHECK-NEXT: [[REDUCE_ADD_0_NARROW:%.*]] = trunc i32 [[REDUCE_ADD_0]] to i8
-; CHECK-NEXT: [[OUT:%.*]] = getelementptr inbounds [1024 x i8], ptr [[B]], i64 0, i64 [[IV_0]]
-; CHECK-NEXT: store i8 [[REDUCE_ADD_0_NARROW]], ptr [[OUT]], align 1
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV_0]], 2
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: ret void
;
@@ -675,26 +658,9 @@ define void @test(ptr %A, ptr noalias %B) #0 {
; MAX-BW-NEXT: store i8 [[TMP67]], ptr [[TMP51]], align 1
; MAX-BW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; MAX-BW-NEXT: [[TMP68:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
-; MAX-BW-NEXT: br i1 [[TMP68]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; MAX-BW-NEXT: br i1 [[TMP68]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; MAX-BW: [[MIDDLE_BLOCK]]:
; MAX-BW-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; MAX-BW: [[SCALAR_PH:.*]]:
-; MAX-BW-NEXT: br label %[[FOR_BODY:.*]]
-; MAX-BW: [[FOR_BODY]]:
-; MAX-BW-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; MAX-BW-NEXT: [[IV_0:%.*]] = add nuw nsw i64 [[IV]], 0
-; MAX-BW-NEXT: [[IV_1:%.*]] = add nuw nsw i64 [[IV]], 1
-; MAX-BW-NEXT: [[IN0:%.*]] = getelementptr inbounds [1024 x i32], ptr [[A]], i64 0, i64 [[IV_0]]
-; MAX-BW-NEXT: [[IN1:%.*]] = getelementptr inbounds [1024 x i32], ptr [[A]], i64 0, i64 [[IV_1]]
-; MAX-BW-NEXT: [[V0:%.*]] = load i32, ptr [[IN0]], align 4
-; MAX-BW-NEXT: [[V1:%.*]] = load i32, ptr [[IN1]], align 4
-; MAX-BW-NEXT: [[REDUCE_ADD_0:%.*]] = add i32 [[V0]], [[V1]]
-; MAX-BW-NEXT: [[REDUCE_ADD_0_NARROW:%.*]] = trunc i32 [[REDUCE_ADD_0]] to i8
-; MAX-BW-NEXT: [[OUT:%.*]] = getelementptr inbounds [1024 x i8], ptr [[B]], i64 0, i64 [[IV_0]]
-; MAX-BW-NEXT: store i8 [[REDUCE_ADD_0_NARROW]], ptr [[OUT]], align 1
-; MAX-BW-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV_0]], 2
-; MAX-BW-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 1024
-; MAX-BW-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]]
; MAX-BW: [[FOR_COND_CLEANUP]]:
; MAX-BW-NEXT: ret void
;
@@ -745,9 +711,10 @@ attributes #0 = { "target-cpu"="core-avx2" "target-features"="+avx,+avx2,+sse,+s
; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]}
; CHECK: [[META6]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]], [[META7]]}
-; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]], [[META6]]}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META6]], [[META7]]}
+; CHECK: [[PROF8]] = !{!"branch_weights", i32 4, i32 28}
+; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META6]], [[META7]]}
+; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META7]], [[META6]]}
+; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META6]], [[META7]]}
;.
; MAX-BW: [[INT_TBAA1]] = !{[[META2:![0-9]+]], [[META2]], i64 0}
; MAX-BW: [[META2]] = !{!"int", [[META3:![0-9]+]], i64 0}
@@ -756,7 +723,8 @@ attributes #0 = { "target-cpu"="core-avx2" "target-features"="+avx,+avx2,+sse,+s
; MAX-BW: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]}
; MAX-BW: [[META6]] = !{!"llvm.loop.isvectorized", i32 1}
; MAX-BW: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"}
-; MAX-BW: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]], [[META7]]}
-; MAX-BW: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]], [[META6]]}
-; MAX-BW: [[LOOP10]] = distinct !{[[LOOP10]], [[META6]], [[META7]]}
+; MAX-BW: [[PROF8]] = !{!"branch_weights", i32 4, i32 28}
+; MAX-BW: [[LOOP9]] = distinct !{[[LOOP9]], [[META6]], [[META7]]}
+; MAX-BW: [[LOOP10]] = distinct !{[[LOOP10]], [[META7]], [[META6]]}
+; MAX-BW: [[LOOP11]] = distinct !{[[LOOP11]], [[META6]], [[META7]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll
index 669e925..7069534 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll
@@ -28,23 +28,9 @@ define dso_local void @tail_folding_enabled(ptr noalias nocapture %A, ptr noalia
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 432
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
-; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]]
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 430
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
;
entry:
br label %for.body
@@ -89,25 +75,11 @@ define dso_local void @tail_folding_disabled(ptr noalias nocapture %A, ptr noali
; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP6]], ptr [[TMP7]], i32 4, <8 x i1> [[TMP1]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 432
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
-; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]]
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 430
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
;
entry:
br label %for.body
@@ -170,28 +142,12 @@ define i32 @reduction_i32(ptr nocapture readonly %A, ptr nocapture readonly %B,
; CHECK-NEXT: [[TMP11:%.*]] = select <8 x i1> [[TMP4]], <8 x i32> [[TMP10]], <8 x i32> [[VEC_PHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP11]])
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[SUM_0:%.*]] = phi i32 [ [[SUM_1:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDXA]], align 4
-; CHECK-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDXB]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP14]]
-; CHECK-NEXT: [[SUM_1]] = add nuw nsw i32 [[ADD]], [[SUM_0]]
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_1]], [[FOR_BODY]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[SUM_1_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP13]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll
index 27150cb..63f9a13 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll
@@ -398,27 +398,9 @@ define i32 @test_count_bits(ptr %test_base) {
; CHECK-NEXT: [[BIN_RDX13:%.*]] = add <4 x i32> [[TMP38]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX14:%.*]] = add <4 x i32> [[TMP39]], [[BIN_RDX13]]
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX14]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[BYTE:%.*]] = udiv i64 [[IV]], 8
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[BYTE]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i8, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: [[BIT:%.*]] = urem i64 [[IV]], 8
-; CHECK-NEXT: [[BIT_TRUNC:%.*]] = trunc i64 [[BIT]] to i8
-; CHECK-NEXT: [[MASK:%.*]] = lshr i8 [[EARLYCND]], [[BIT_TRUNC]]
-; CHECK-NEXT: [[TEST:%.*]] = and i8 [[MASK]], 1
-; CHECK-NEXT: [[VAL:%.*]] = zext i8 [[TEST]] to i32
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LOOP]] ], [ [[TMP41]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP41]]
;
entry:
%alloca = alloca [4096 x i32]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
index 3ae8001..28de5c7 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
@@ -141,20 +141,7 @@ define void @vectorized1(ptr noalias nocapture %A, ptr noalias nocapture readonl
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP8]], [[TMP9]]
-; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 20
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -200,22 +187,9 @@ define void @vectorized2(ptr noalias nocapture %A, ptr noalias nocapture readonl
; CHECK-NEXT: store <8 x float> [[TMP5]], ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP7]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]]
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP7]], [[TMP8]]
-; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 16
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll
index 282e9a5..1e94f83 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll
@@ -38,21 +38,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; IF-EVL: scalar.ph:
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
-; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]]
-; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4
-; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
; IF-EVL: for.cond.cleanup:
; IF-EVL-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll
index 69cdd65..455fe83 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll
@@ -74,22 +74,7 @@ define void @test_pr59090(ptr %l_out, ptr noalias %b) #0 {
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10008
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP1:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IV_MUL:%.*]] = mul nuw i64 [[IV]], 6
-; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[B]], align 1, !llvm.access.group [[ACC_GRP0]]
-; CHECK-NEXT: store i8 [[L]], ptr [[B]], align 1, !llvm.access.group [[ACC_GRP0]]
-; CHECK-NEXT: [[ARRAYIDX77:%.*]] = getelementptr i8, ptr [[L_OUT]], i64 [[IV_MUL]]
-; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX77]], align 1, !llvm.access.group [[ACC_GRP0]]
-; CHECK-NEXT: [[ADD_2:%.*]] = add i64 [[IV_MUL]], 2
-; CHECK-NEXT: [[ARRAYIDX97:%.*]] = getelementptr i8, ptr [[L_OUT]], i64 [[ADD_2]]
-; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX97]], align 1, !llvm.access.group [[ACC_GRP0]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 10000
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll b/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll
index bdedcca..9ea9e11 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll
@@ -48,25 +48,7 @@ define void @iv.4_used_as_vector_and_first_lane(ptr %src, ptr noalias %dst) {
; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32
; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[G_SRC:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[G_SRC]], align 8
-; CHECK-NEXT: [[IV_4:%.*]] = add nuw nsw i64 [[IV]], 4
-; CHECK-NEXT: [[C:%.*]] = icmp ule i64 [[L]], 128
-; CHECK-NEXT: br i1 [[C]], label [[LOOP_THEN:%.*]], label [[LOOP_LATCH]]
-; CHECK: loop.then:
-; CHECK-NEXT: [[OR:%.*]] = or disjoint i64 [[IV_4]], 1
-; CHECK-NEXT: [[G_DST:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[OR]]
-; CHECK-NEXT: store i64 [[IV_4]], ptr [[G_DST]], align 4
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 32
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP_HEADER]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -131,25 +113,7 @@ define void @iv.4_used_as_first_lane(ptr %src, ptr noalias %dst) {
; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32
; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[G_SRC:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[G_SRC]], align 8
-; CHECK-NEXT: [[IV_4:%.*]] = add nuw nsw i64 [[IV]], 4
-; CHECK-NEXT: [[C:%.*]] = icmp ule i64 [[L]], 128
-; CHECK-NEXT: br i1 [[C]], label [[LOOP_THEN:%.*]], label [[LOOP_LATCH]]
-; CHECK: loop.then:
-; CHECK-NEXT: [[OR:%.*]] = or disjoint i64 [[IV_4]], 1
-; CHECK-NEXT: [[G_DST:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[OR]]
-; CHECK-NEXT: store i64 [[L]], ptr [[G_DST]], align 4
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 32
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP_HEADER]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll
index f9403b8..774f0db 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll
@@ -134,30 +134,9 @@ define i32 @predicated_sdiv_masked_load(ptr %a, ptr %b, i32 %x, i1 %c) {
; SINK-GATHER-NEXT: br i1 [[TMP48]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; SINK-GATHER: middle.block:
; SINK-GATHER-NEXT: [[TMP49:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP47]])
-; SINK-GATHER-NEXT: br label [[FOR_END:%.*]]
-; SINK-GATHER: scalar.ph:
-; SINK-GATHER-NEXT: br label [[FOR_BODY:%.*]]
-; SINK-GATHER: for.body:
-; SINK-GATHER-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; SINK-GATHER-NEXT: [[R:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[T7:%.*]], [[FOR_INC]] ]
-; SINK-GATHER-NEXT: [[T0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
-; SINK-GATHER-NEXT: [[T1:%.*]] = load i32, ptr [[T0]], align 4
-; SINK-GATHER-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]]
-; SINK-GATHER: if.then:
-; SINK-GATHER-NEXT: [[T2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]]
-; SINK-GATHER-NEXT: [[T3:%.*]] = load i32, ptr [[T2]], align 4
-; SINK-GATHER-NEXT: [[T4:%.*]] = sdiv i32 [[T3]], [[X]]
-; SINK-GATHER-NEXT: [[T5:%.*]] = add nsw i32 [[T4]], [[T1]]
-; SINK-GATHER-NEXT: br label [[FOR_INC]]
-; SINK-GATHER: for.inc:
-; SINK-GATHER-NEXT: [[T6:%.*]] = phi i32 [ [[T1]], [[FOR_BODY]] ], [ [[T5]], [[IF_THEN]] ]
-; SINK-GATHER-NEXT: [[T7]] = add i32 [[R]], [[T6]]
-; SINK-GATHER-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
-; SINK-GATHER-NEXT: [[COND:%.*]] = icmp eq i64 [[I_NEXT]], 10000
-; SINK-GATHER-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]]
+; SINK-GATHER-NEXT: br label [[FOR_INC:%.*]]
; SINK-GATHER: for.end:
-; SINK-GATHER-NEXT: [[T8:%.*]] = phi i32 [ [[T7]], [[FOR_INC]] ], [ [[TMP49]], [[MIDDLE_BLOCK]] ]
-; SINK-GATHER-NEXT: ret i32 [[T8]]
+; SINK-GATHER-NEXT: ret i32 [[TMP49]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/bsd_regex.ll b/llvm/test/Transforms/LoopVectorize/bsd_regex.ll
index afdbfaa..f64255f 100644
--- a/llvm/test/Transforms/LoopVectorize/bsd_regex.ll
+++ b/llvm/test/Transforms/LoopVectorize/bsd_regex.ll
@@ -37,11 +37,7 @@ define i32 @foo(ptr nocapture %A) {
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 undef
;
diff --git a/llvm/test/Transforms/LoopVectorize/check-prof-info.ll b/llvm/test/Transforms/LoopVectorize/check-prof-info.ll
index ce9d1f2..b59ad84 100644
--- a/llvm/test/Transforms/LoopVectorize/check-prof-info.ll
+++ b/llvm/test/Transforms/LoopVectorize/check-prof-info.ll
@@ -19,12 +19,8 @@ define void @_Z3foov() {
; CHECK: vector.body:
; CHECK: br i1 [[TMP6:%.*]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF1:![0-9]+]], !llvm.loop [[LOOP2:![0-9]+]]
; CHECK: middle.block:
-; CHECK: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
-; CHECK: for.body:
-; CHECK: br i1 [[EXITCOND:%.*]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !prof [[PROF5:![0-9]+]]
;
; CHECK-MASKED-LABEL: @_Z3foov(
; CHECK-MASKED: entry:
@@ -34,12 +30,8 @@ define void @_Z3foov() {
; CHECK-MASKED: vector.body:
; CHECK-MASKED: br i1 [[TMP18:%.*]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF1:![0-9]+]], !llvm.loop [[LOOP2:![0-9]+]]
; CHECK-MASKED: middle.block:
-; CHECK-MASKED: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK-MASKED: scalar.ph:
; CHECK-MASKED: br label [[FOR_BODY:%.*]]
; CHECK-MASKED: for.cond.cleanup:
-; CHECK-MASKED: for.body:
-; CHECK-MASKED: br i1 [[EXITCOND:%.*]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !prof [[PROF5:![0-9]+]]
;
; CHECK-SCALABLE-LABEL: @_Z3foov(
; CHECK-SCALABLE: entry:
diff --git a/llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll b/llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll
index bd0655d..143a0af 100644
--- a/llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll
+++ b/llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll
@@ -19,19 +19,6 @@ define void @test(ptr %data) {
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[OR_IV_1:%.*]] = or disjoint i64 [[IV]], 1
-; CHECK-NEXT: [[GEP_POSTSCALE:%.*]] = getelementptr [64 x float], ptr @postscale, i64 0, i64 [[OR_IV_1]]
-; CHECK-NEXT: [[LOAD_POSTSCALE:%.*]] = load float, ptr [[GEP_POSTSCALE]], align 4, !tbaa [[FLOAT_TBAA0]]
-; CHECK-NEXT: [[LRINT:%.*]] = tail call i64 @llvm.lrint.i64.f32(float [[LOAD_POSTSCALE]])
-; CHECK-NEXT: [[LRINT_TRUNC:%.*]] = trunc i64 [[LRINT]] to i16
-; CHECK-NEXT: store i16 [[LRINT_TRUNC]], ptr [[DATA]], align 2, !tbaa [[SHORT_TBAA4]]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV_NEXT]], 8
-; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[END]], label %[[LOOP]]
; CHECK: [[END]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/constantfolder.ll b/llvm/test/Transforms/LoopVectorize/constantfolder.ll
index 37f2e73..66592b0 100644
--- a/llvm/test/Transforms/LoopVectorize/constantfolder.ll
+++ b/llvm/test/Transforms/LoopVectorize/constantfolder.ll
@@ -16,20 +16,6 @@ define void @const_fold_ptradd(ptr %dst, i64 %d) {
; CHECK-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[CONST_0:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 0, %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[DST]], i64 [[CONST_0]]
-; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -70,20 +56,6 @@ define void @const_fold_inbounds_ptradd(ptr %dst, i64 %d) {
; CHECK-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[CONST_0:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 0, %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[CONST_0]]
-; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -125,20 +97,6 @@ define void @const_fold_select(ptr %dst, i64 %d) {
; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[CONST_1:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 1, %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[OR:%.*]] = or i64 [[D]], [[CONST_1]]
-; CHECK-NEXT: store i64 [[OR]], ptr [[DST]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -179,24 +137,6 @@ define void @const_fold_add_sub_mul_ashr_lshr(ptr %dst, i64 %d) {
; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[CONST_1:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 1, %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[ADD:%.*]] = add i64 2, [[CONST_1]]
-; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[ADD]], [[CONST_1]]
-; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SUB]], [[CONST_1]]
-; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[ASHR]], 3
-; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 [[MUL]], [[CONST_1]]
-; CHECK-NEXT: store i64 [[LSHR]], ptr [[DST]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -241,22 +181,6 @@ define void @const_fold_and_or_xor(ptr %dst, i64 %d) {
; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[CONST_1:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 1, %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[OR:%.*]] = or i64 2, [[CONST_1]]
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[OR]], [[CONST_1]]
-; CHECK-NEXT: [[XOR:%.*]] = and i64 [[AND]], [[CONST_1]]
-; CHECK-NEXT: store i64 [[XOR]], ptr [[DST]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -299,21 +223,6 @@ define void @const_fold_cmp_zext(ptr %dst, i64 %d) {
; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[CONST_1:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 1, %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[VAL:%.*]] = icmp ugt i64 2, [[CONST_1]]
-; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[VAL]] to i8
-; CHECK-NEXT: store i8 [[ZEXT]], ptr [[DST]], align 1
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -355,20 +264,6 @@ define void @const_fold_trunc(ptr %dst, i64 %d) {
; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[CONST_0:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 0, %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[CONST_0]] to i16
-; CHECK-NEXT: store i16 [[TRUNC]], ptr [[DST]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll b/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll
index 33e688c..62399c5 100644
--- a/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll
+++ b/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll
@@ -67,19 +67,7 @@ define void @test(i32 %arg, i32 %L1.limit, i32 %L2.switch, i1 %c, ptr %dst) {
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 12
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[L2_HEADER_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[L2_INNER_HEADER:%.*]]
-; CHECK: L2.Inner.header:
-; CHECK-NEXT: [[L2_ACCUM:%.*]] = phi i32 [ [[L2_ACCUM_NEXT:%.*]], [[L2_INNER_HEADER]] ], [ 1, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[L2_IV:%.*]] = phi i64 [ [[L2_IV_NEXT:%.*]], [[L2_INNER_HEADER]] ], [ 1, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[L2_ACCUM_NEXT]] = sub i32 [[L2_ACCUM]], [[L1_EXIT_VAL]]
-; CHECK-NEXT: [[L2_DUMMY_BUT_NEED_IT:%.*]] = sext i32 [[L2_ACCUM_NEXT]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[L2_IV]]
-; CHECK-NEXT: store i64 [[L2_DUMMY_BUT_NEED_IT]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[L2_IV_NEXT]] = add nuw nsw i64 [[L2_IV]], 1
-; CHECK-NEXT: [[L2_EXIT_COND:%.*]] = icmp ugt i64 [[L2_IV]], 11
-; CHECK-NEXT: br i1 [[L2_EXIT_COND]], label [[L2_HEADER_LOOPEXIT]], label [[L2_INNER_HEADER]]
; CHECK: L2.exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll
index 0a8e9dc..02e1d0e 100644
--- a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll
+++ b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll
@@ -94,20 +94,8 @@ define void @pr47390(ptr %a) {
; CHECK-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[PRIMARY:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[PRIMARY_ADD:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[USE_PRIMARY:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[PRIMARY]], %[[LOOP]] ]
-; CHECK-NEXT: [[SECONDARY:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[SECONDARY_ADD:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[PRIMARY_ADD]] = add i32 [[PRIMARY]], 1
-; CHECK-NEXT: [[SECONDARY_ADD]] = add i32 [[SECONDARY]], 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[SECONDARY]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[GEP]], align 8
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[SECONDARY]], 5
-; CHECK-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll b/llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll
index f61478b..b31b732 100644
--- a/llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll
+++ b/llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll
@@ -15,15 +15,6 @@ define i32 @foo(ptr %p) {
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]], !dbg [[DBG3]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ], !dbg [[DBG7:![0-9]+]]
-; CHECK-NEXT: [[CONV:%.*]] = trunc i64 0 to i8, !dbg [[DBG8:![0-9]+]]
-; CHECK-NEXT: store i8 [[CONV]], ptr [[P]], align 1, !dbg [[DBG3]]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !dbg [[DBG9:![0-9]+]]
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 1, !dbg [[DBG10:![0-9]+]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG11:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret i32 0
;
@@ -64,9 +55,4 @@ exit: ; preds = %loop
; CHECK: [[META4]] = distinct !DISubprogram(name: "foo", scope: [[META1]], file: [[META1]], line: 11, type: [[META5:![0-9]+]], spFlags: DISPFlagDefinition, unit: [[META0]], retainedNodes: [[META6:![0-9]+]])
; CHECK: [[META5]] = distinct !DISubroutineType(types: [[META6]])
; CHECK: [[META6]] = !{}
-; CHECK: [[DBG7]] = !DILocation(line: 4, scope: [[META4]])
-; CHECK: [[DBG8]] = !DILocation(line: 5, scope: [[META4]])
-; CHECK: [[DBG9]] = !DILocation(line: 7, scope: [[META4]])
-; CHECK: [[DBG10]] = !DILocation(line: 8, scope: [[META4]])
-; CHECK: [[DBG11]] = !DILocation(line: 9, scope: [[META4]])
;.
diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll
index d97624f..274bd04 100644
--- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll
+++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll
@@ -24,16 +24,7 @@ define dso_local void @constTC(ptr noalias nocapture %A) optsize {
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1800
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
-; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 1800
-; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
index 4f5a26e9..156c2bd 100644
--- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
+++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
@@ -198,16 +198,7 @@ define dso_local void @cannotProveAlignedTC(ptr noalias nocapture %A, i32 %p, i3
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[RIVPLUS1:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
-; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], [[N]]
-; CHECK-NEXT: br i1 [[COND]], label [[EXIT_LOOPEXIT]], label [[LOOP]]
; CHECK: exit.loopexit:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll
index ff2baec..eca39e6 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll
@@ -108,25 +108,8 @@ define i32 @sink_after_dead_inst(ptr %A.ptr) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[FOR_PREV:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[FOR]], 15
-; CHECK-NEXT: [[C:%.*]] = icmp eq i1 [[CMP]], true
-; CHECK-NEXT: [[VEC_DEAD:%.*]] = and i1 [[C]], true
-; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
-; CHECK-NEXT: [[B1:%.*]] = or i16 [[IV_NEXT]], [[IV_NEXT]]
-; CHECK-NEXT: [[B3:%.*]] = and i1 [[CMP]], [[C]]
-; CHECK-NEXT: [[FOR_PREV]] = zext i16 [[B1]] to i32
-; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[B3]] to i32
-; CHECK-NEXT: [[A_GEP:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[IV]]
-; CHECK-NEXT: store i32 0, ptr [[A_GEP]], align 4
-; CHECK-NEXT: br i1 [[VEC_DEAD]], label %[[FOR_END]], label %[[LOOP]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[FOR_LCSSA]]
+; CHECK-NEXT: ret i32 [[VECTOR_RECUR_EXTRACT_FOR_PHI]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll
index fd19760..ebfe16b 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll
@@ -22,21 +22,8 @@ define float @for_load_interleave_only(ptr %src) {
; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[SRC]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[FOR:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[L:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 16
-; CHECK-NEXT: [[L]] = load float, ptr [[PTR_IV]], align 4
-; CHECK-NEXT: store float 0.000000e+00, ptr [[PTR_IV]], align 4
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[FOR_LCSSA:%.*]] = phi float [ [[FOR]], %[[LOOP]] ], [ [[TMP2]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[FOR_LCSSA]]
+; CHECK-NEXT: ret float [[TMP2]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll
index 149157a..7412980 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll
@@ -119,22 +119,7 @@ define void @test_pr54223_sink_after_insertion_order(ptr noalias %a, ptr noalias
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[FOR_1_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SCALAR_RECUR6:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[FOR_2_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[NEG:%.*]] = fneg float [[SCALAR_RECUR6]]
-; CHECK-NEXT: [[MULADD:%.*]] = call float @llvm.fmuladd.f32(float [[SCALAR_RECUR]], float [[NEG]], float 0.000000e+00)
-; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[FOR_1_NEXT]] = load float, ptr [[A]], align 4
-; CHECK-NEXT: [[FOR_2_NEXT]] = load float, ptr [[B]], align 4
-; CHECK-NEXT: store float [[MULADD]], ptr [[DST_GEP]], align 4
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 10000
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll
new file mode 100644
index 0000000..e97d6e66d
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll
@@ -0,0 +1,244 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
+; RUN: opt -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S %s | FileCheck --check-prefix=VF2IC1 %s
+; RUN: opt -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=2 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S %s | FileCheck --check-prefix=VF2IC2 %s
+; RUN: opt -passes=loop-vectorize -force-vector-width=1 -force-vector-interleave=2 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S %s | FileCheck --check-prefix=VF1IC2 %s
+
+define i32 @FOR_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) {
+; VF2IC1-LABEL: define i32 @FOR_used_outside(
+; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) {
+; VF2IC1-NEXT: [[ENTRY:.*]]:
+; VF2IC1-NEXT: br label %[[LOOP:.*]]
+; VF2IC1: [[LOOP]]:
+; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ]
+; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]]
+; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4
+; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]]
+; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]]
+; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4
+; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1
+; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]]
+; VF2IC1: [[FOR_END]]:
+; VF2IC1-NEXT: [[TMP32:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ]
+; VF2IC1-NEXT: ret i32 [[TMP32]]
+;
+; VF2IC2-LABEL: define i32 @FOR_used_outside(
+; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) {
+; VF2IC2-NEXT: [[ENTRY:.*]]:
+; VF2IC2-NEXT: br label %[[LOOP:.*]]
+; VF2IC2: [[LOOP]]:
+; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ]
+; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]]
+; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4
+; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]]
+; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]]
+; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4
+; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1
+; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]]
+; VF2IC2: [[FOR_END]]:
+; VF2IC2-NEXT: [[TMP66:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ]
+; VF2IC2-NEXT: ret i32 [[TMP66]]
+;
+; VF1IC2-LABEL: define i32 @FOR_used_outside(
+; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) {
+; VF1IC2-NEXT: [[ENTRY:.*]]:
+; VF1IC2-NEXT: br label %[[LOOP:.*]]
+; VF1IC2: [[LOOP]]:
+; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ]
+; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]]
+; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4
+; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]]
+; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]]
+; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4
+; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1
+; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]]
+; VF1IC2: [[FOR_END]]:
+; VF1IC2-NEXT: [[TMP30:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ]
+; VF1IC2-NEXT: ret i32 [[TMP30]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %for = phi i32 [ 33, %entry ], [ %for.next, %loop ]
+ %gep.A = getelementptr inbounds nuw i32, ptr %A, i64 %iv
+ %for.next = load i32, ptr %gep.A, align 4
+ %add = add nsw i32 %for, %for.next
+ %gep.B = getelementptr inbounds nuw i32, ptr %B, i64 %iv
+ store i32 %add, ptr %gep.B, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, %n
+ br i1 %ec, label %for.end, label %loop
+
+for.end:
+ ret i32 %for
+}
+
+define i32 @FOR_next_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) {
+; VF2IC1-LABEL: define i32 @FOR_next_used_outside(
+; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) {
+; VF2IC1-NEXT: [[ENTRY:.*]]:
+; VF2IC1-NEXT: br label %[[LOOP:.*]]
+; VF2IC1: [[LOOP]]:
+; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ]
+; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]]
+; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4
+; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]]
+; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]]
+; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4
+; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1
+; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]]
+; VF2IC1: [[FOR_END]]:
+; VF2IC1-NEXT: [[TMP28:%.*]] = phi i32 [ [[TMP10]], %[[LOOP]] ]
+; VF2IC1-NEXT: ret i32 [[TMP28]]
+;
+; VF2IC2-LABEL: define i32 @FOR_next_used_outside(
+; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) {
+; VF2IC2-NEXT: [[ENTRY:.*]]:
+; VF2IC2-NEXT: br label %[[LOOP:.*]]
+; VF2IC2: [[LOOP]]:
+; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ]
+; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]]
+; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4
+; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]]
+; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]]
+; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4
+; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1
+; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]]
+; VF2IC2: [[FOR_END]]:
+; VF2IC2-NEXT: [[TMP62:%.*]] = phi i32 [ [[TMP23]], %[[LOOP]] ]
+; VF2IC2-NEXT: ret i32 [[TMP62]]
+;
+; VF1IC2-LABEL: define i32 @FOR_next_used_outside(
+; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) {
+; VF1IC2-NEXT: [[ENTRY:.*]]:
+; VF1IC2-NEXT: br label %[[LOOP:.*]]
+; VF1IC2: [[LOOP]]:
+; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ]
+; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]]
+; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4
+; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]]
+; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]]
+; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4
+; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1
+; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]]
+; VF1IC2: [[FOR_END]]:
+; VF1IC2-NEXT: [[TMP27:%.*]] = phi i32 [ [[TMP7]], %[[LOOP]] ]
+; VF1IC2-NEXT: ret i32 [[TMP27]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %for = phi i32 [ 33, %entry ], [ %for.next, %loop ]
+ %gep.A = getelementptr inbounds nuw i32, ptr %A, i64 %iv
+ %for.next = load i32, ptr %gep.A, align 4
+ %add = add nsw i32 %for, %for.next
+ %gep.B = getelementptr inbounds nuw i32, ptr %B, i64 %iv
+ store i32 %add, ptr %gep.B, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, %n
+ br i1 %ec, label %for.end, label %loop
+
+for.end:
+ ret i32 %for.next
+}
+
+define i32 @FOR_and_next_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) {
+; VF2IC1-LABEL: define i32 @FOR_and_next_used_outside(
+; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) {
+; VF2IC1-NEXT: [[ENTRY:.*]]:
+; VF2IC1-NEXT: br label %[[LOOP:.*]]
+; VF2IC1: [[LOOP]]:
+; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ]
+; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]]
+; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4
+; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]]
+; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]]
+; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4
+; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1
+; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]]
+; VF2IC1: [[FOR_END]]:
+; VF2IC1-NEXT: [[TMP32:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ]
+; VF2IC1-NEXT: [[TMP33:%.*]] = phi i32 [ [[TMP10]], %[[LOOP]] ]
+; VF2IC1-NEXT: [[RES:%.*]] = add i32 [[TMP32]], [[TMP33]]
+; VF2IC1-NEXT: ret i32 [[RES]]
+;
+; VF2IC2-LABEL: define i32 @FOR_and_next_used_outside(
+; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) {
+; VF2IC2-NEXT: [[ENTRY:.*]]:
+; VF2IC2-NEXT: br label %[[LOOP:.*]]
+; VF2IC2: [[LOOP]]:
+; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ]
+; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]]
+; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4
+; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]]
+; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]]
+; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4
+; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1
+; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]]
+; VF2IC2: [[FOR_END]]:
+; VF2IC2-NEXT: [[TMP66:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ]
+; VF2IC2-NEXT: [[TMP71:%.*]] = phi i32 [ [[TMP23]], %[[LOOP]] ]
+; VF2IC2-NEXT: [[RES:%.*]] = add i32 [[TMP66]], [[TMP71]]
+; VF2IC2-NEXT: ret i32 [[RES]]
+;
+; VF1IC2-LABEL: define i32 @FOR_and_next_used_outside(
+; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) {
+; VF1IC2-NEXT: [[ENTRY:.*]]:
+; VF1IC2-NEXT: br label %[[LOOP:.*]]
+; VF1IC2: [[LOOP]]:
+; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ]
+; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]]
+; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4
+; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]]
+; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]]
+; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4
+; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1
+; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]]
+; VF1IC2: [[FOR_END]]:
+; VF1IC2-NEXT: [[TMP30:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ]
+; VF1IC2-NEXT: [[TMP33:%.*]] = phi i32 [ [[TMP7]], %[[LOOP]] ]
+; VF1IC2-NEXT: [[RES:%.*]] = add i32 [[TMP30]], [[TMP33]]
+; VF1IC2-NEXT: ret i32 [[RES]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %for = phi i32 [ 33, %entry ], [ %for.next, %loop ]
+ %gep.A = getelementptr inbounds nuw i32, ptr %A, i64 %iv
+ %for.next = load i32, ptr %gep.A, align 4
+ %add = add nsw i32 %for, %for.next
+ %gep.B = getelementptr inbounds nuw i32, ptr %B, i64 %iv
+ store i32 %add, ptr %gep.B, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, %n
+ br i1 %ec, label %for.end, label %loop
+
+for.end:
+ %res = add i32 %for, %for.next
+ ret i32 %res
+}
+
+
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
index 443e44b..bd0c098 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
@@ -1193,19 +1193,9 @@ define i64 @constant_folded_previous_value() {
; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
; UNROLL-NO-IC-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; UNROLL-NO-IC: middle.block:
-; UNROLL-NO-IC-NEXT: br label [[FOR_END:%.*]]
-; UNROLL-NO-IC: scalar.ph:
; UNROLL-NO-IC-NEXT: br label [[SCALAR_BODY:%.*]]
-; UNROLL-NO-IC: scalar.body:
-; UNROLL-NO-IC-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[SCALAR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[VAR2:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[VAR3:%.*]], [[SCALAR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[VAR3]] = add i64 0, 1
-; UNROLL-NO-IC-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
-; UNROLL-NO-IC-NEXT: [[COND:%.*]] = icmp eq i64 [[I_NEXT]], 1000
-; UNROLL-NO-IC-NEXT: br i1 [[COND]], label [[FOR_END]], label [[SCALAR_BODY]]
; UNROLL-NO-IC: for.end:
-; UNROLL-NO-IC-NEXT: [[VAR2_LCSSA:%.*]] = phi i64 [ [[VAR2]], [[SCALAR_BODY]] ], [ 1, [[MIDDLE_BLOCK]] ]
-; UNROLL-NO-IC-NEXT: ret i64 [[VAR2_LCSSA]]
+; UNROLL-NO-IC-NEXT: ret i64 1
;
; UNROLL-NO-VF-LABEL: @constant_folded_previous_value(
; UNROLL-NO-VF-NEXT: entry:
@@ -1218,19 +1208,9 @@ define i64 @constant_folded_previous_value() {
; UNROLL-NO-VF-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
; UNROLL-NO-VF-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; UNROLL-NO-VF: middle.block:
-; UNROLL-NO-VF-NEXT: br label [[FOR_END:%.*]]
-; UNROLL-NO-VF: scalar.ph:
; UNROLL-NO-VF-NEXT: br label [[SCALAR_BODY:%.*]]
-; UNROLL-NO-VF: scalar.body:
-; UNROLL-NO-VF-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[SCALAR_BODY]] ]
-; UNROLL-NO-VF-NEXT: [[VAR2:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[VAR3:%.*]], [[SCALAR_BODY]] ]
-; UNROLL-NO-VF-NEXT: [[VAR3]] = add i64 0, 1
-; UNROLL-NO-VF-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
-; UNROLL-NO-VF-NEXT: [[COND:%.*]] = icmp eq i64 [[I_NEXT]], 1000
-; UNROLL-NO-VF-NEXT: br i1 [[COND]], label [[FOR_END]], label [[SCALAR_BODY]]
; UNROLL-NO-VF: for.end:
-; UNROLL-NO-VF-NEXT: [[VAR2_LCSSA:%.*]] = phi i64 [ [[VAR2]], [[SCALAR_BODY]] ], [ 1, [[MIDDLE_BLOCK]] ]
-; UNROLL-NO-VF-NEXT: ret i64 [[VAR2_LCSSA]]
+; UNROLL-NO-VF-NEXT: ret i64 1
;
; SINK-AFTER-LABEL: @constant_folded_previous_value(
; SINK-AFTER-NEXT: entry:
@@ -1243,19 +1223,9 @@ define i64 @constant_folded_previous_value() {
; SINK-AFTER-NEXT: [[TMP0:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
; SINK-AFTER-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; SINK-AFTER: middle.block:
-; SINK-AFTER-NEXT: br label [[FOR_END:%.*]]
-; SINK-AFTER: scalar.ph:
; SINK-AFTER-NEXT: br label [[SCALAR_BODY:%.*]]
-; SINK-AFTER: scalar.body:
-; SINK-AFTER-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[SCALAR_BODY]] ]
-; SINK-AFTER-NEXT: [[VAR2:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[VAR3:%.*]], [[SCALAR_BODY]] ]
-; SINK-AFTER-NEXT: [[VAR3]] = add i64 0, 1
-; SINK-AFTER-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
-; SINK-AFTER-NEXT: [[COND:%.*]] = icmp eq i64 [[I_NEXT]], 1000
-; SINK-AFTER-NEXT: br i1 [[COND]], label [[FOR_END]], label [[SCALAR_BODY]]
; SINK-AFTER: for.end:
-; SINK-AFTER-NEXT: [[VAR2_LCSSA:%.*]] = phi i64 [ [[VAR2]], [[SCALAR_BODY]] ], [ 1, [[MIDDLE_BLOCK]] ]
-; SINK-AFTER-NEXT: ret i64 [[VAR2_LCSSA]]
+; SINK-AFTER-NEXT: ret i64 1
;
entry:
br label %scalar.body
@@ -2725,21 +2695,9 @@ define i32 @sink_into_replication_region(i32 %y) {
; UNROLL-NO-IC: middle.block:
; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP49]], [[TMP48]]
; UNROLL-NO-IC-NEXT: [[TMP51:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
-; UNROLL-NO-IC-NEXT: br label [[BB1:%.*]]
-; UNROLL-NO-IC: scalar.ph:
; UNROLL-NO-IC-NEXT: br label [[BB2:%.*]]
; UNROLL-NO-IC: bb1:
-; UNROLL-NO-IC-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP51]], [[MIDDLE_BLOCK]] ]
-; UNROLL-NO-IC-NEXT: ret i32 [[VAR]]
-; UNROLL-NO-IC: bb2:
-; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ]
-; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
-; UNROLL-NO-IC-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
-; UNROLL-NO-IC-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1
-; UNROLL-NO-IC-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2
-; UNROLL-NO-IC-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27:![0-9]+]]
+; UNROLL-NO-IC-NEXT: ret i32 [[TMP51]]
;
; UNROLL-NO-VF-LABEL: @sink_into_replication_region(
; UNROLL-NO-VF-NEXT: bb:
@@ -2785,21 +2743,9 @@ define i32 @sink_into_replication_region(i32 %y) {
; UNROLL-NO-VF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF25:![0-9]+]], !llvm.loop [[LOOP26:![0-9]+]]
; UNROLL-NO-VF: middle.block:
; UNROLL-NO-VF-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP12]]
-; UNROLL-NO-VF-NEXT: br label [[BB1:%.*]]
-; UNROLL-NO-VF: scalar.ph:
; UNROLL-NO-VF-NEXT: br label [[BB2:%.*]]
; UNROLL-NO-VF: bb1:
-; UNROLL-NO-VF-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
-; UNROLL-NO-VF-NEXT: ret i32 [[VAR]]
-; UNROLL-NO-VF: bb2:
-; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ]
-; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
-; UNROLL-NO-VF-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
-; UNROLL-NO-VF-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1
-; UNROLL-NO-VF-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2
-; UNROLL-NO-VF-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27:![0-9]+]]
+; UNROLL-NO-VF-NEXT: ret i32 [[BIN_RDX]]
;
; SINK-AFTER-LABEL: @sink_into_replication_region(
; SINK-AFTER-NEXT: bb:
@@ -2868,21 +2814,9 @@ define i32 @sink_into_replication_region(i32 %y) {
; SINK-AFTER-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF25:![0-9]+]], !llvm.loop [[LOOP26:![0-9]+]]
; SINK-AFTER: middle.block:
; SINK-AFTER-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP25]])
-; SINK-AFTER-NEXT: br label [[BB1:%.*]]
-; SINK-AFTER: scalar.ph:
; SINK-AFTER-NEXT: br label [[BB2:%.*]]
; SINK-AFTER: bb1:
-; SINK-AFTER-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP27]], [[MIDDLE_BLOCK]] ]
-; SINK-AFTER-NEXT: ret i32 [[VAR]]
-; SINK-AFTER: bb2:
-; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ]
-; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
-; SINK-AFTER-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
-; SINK-AFTER-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1
-; SINK-AFTER-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2
-; SINK-AFTER-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27:![0-9]+]]
+; SINK-AFTER-NEXT: ret i32 [[TMP27]]
;
bb:
br label %bb2
@@ -3078,25 +3012,9 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) {
; UNROLL-NO-IC: middle.block:
; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP73]], [[TMP72]]
; UNROLL-NO-IC-NEXT: [[TMP75:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
-; UNROLL-NO-IC-NEXT: br label [[BB1:%.*]]
-; UNROLL-NO-IC: scalar.ph:
; UNROLL-NO-IC-NEXT: br label [[BB2:%.*]]
; UNROLL-NO-IC: bb1:
-; UNROLL-NO-IC-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP75]], [[MIDDLE_BLOCK]] ]
-; UNROLL-NO-IC-NEXT: ret i32 [[VAR]]
-; UNROLL-NO-IC: bb2:
-; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ]
-; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]]
-; UNROLL-NO-IC-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
-; UNROLL-NO-IC-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
-; UNROLL-NO-IC-NEXT: store i32 [[VAR3]], ptr [[G]], align 4
-; UNROLL-NO-IC-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1
-; UNROLL-NO-IC-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1
-; UNROLL-NO-IC-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2
-; UNROLL-NO-IC-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27]]
+; UNROLL-NO-IC-NEXT: ret i32 [[TMP75]]
;
; UNROLL-NO-VF-LABEL: @sink_into_replication_region_multiple(
; UNROLL-NO-VF-NEXT: bb:
@@ -3155,25 +3073,9 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) {
; UNROLL-NO-VF-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF25]], !llvm.loop [[LOOP28:![0-9]+]]
; UNROLL-NO-VF: middle.block:
; UNROLL-NO-VF-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP17]], [[TMP16]]
-; UNROLL-NO-VF-NEXT: br label [[BB1:%.*]]
-; UNROLL-NO-VF: scalar.ph:
; UNROLL-NO-VF-NEXT: br label [[BB2:%.*]]
; UNROLL-NO-VF: bb1:
-; UNROLL-NO-VF-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
-; UNROLL-NO-VF-NEXT: ret i32 [[VAR]]
-; UNROLL-NO-VF: bb2:
-; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ]
-; UNROLL-NO-VF-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]]
-; UNROLL-NO-VF-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
-; UNROLL-NO-VF-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
-; UNROLL-NO-VF-NEXT: store i32 [[VAR3]], ptr [[G]], align 4
-; UNROLL-NO-VF-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1
-; UNROLL-NO-VF-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1
-; UNROLL-NO-VF-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2
-; UNROLL-NO-VF-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27]]
+; UNROLL-NO-VF-NEXT: ret i32 [[BIN_RDX]]
;
; SINK-AFTER-LABEL: @sink_into_replication_region_multiple(
; SINK-AFTER-NEXT: bb:
@@ -3273,25 +3175,9 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) {
; SINK-AFTER-NEXT: br i1 [[TMP38]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF25]], !llvm.loop [[LOOP28:![0-9]+]]
; SINK-AFTER: middle.block:
; SINK-AFTER-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP37]])
-; SINK-AFTER-NEXT: br label [[BB1:%.*]]
-; SINK-AFTER: scalar.ph:
; SINK-AFTER-NEXT: br label [[BB2:%.*]]
; SINK-AFTER: bb1:
-; SINK-AFTER-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP39]], [[MIDDLE_BLOCK]] ]
-; SINK-AFTER-NEXT: ret i32 [[VAR]]
-; SINK-AFTER: bb2:
-; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ]
-; SINK-AFTER-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]]
-; SINK-AFTER-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
-; SINK-AFTER-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
-; SINK-AFTER-NEXT: store i32 [[VAR3]], ptr [[G]], align 4
-; SINK-AFTER-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1
-; SINK-AFTER-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1
-; SINK-AFTER-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2
-; SINK-AFTER-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27]]
+; SINK-AFTER-NEXT: ret i32 [[TMP39]]
;
bb:
br label %bb2
@@ -3341,26 +3227,9 @@ define i32 @sink_after_dead_inst(ptr %A.ptr, i32 %n) {
; UNROLL-NO-IC-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; UNROLL-NO-IC: middle.block:
; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[TMP3]], i32 2
-; UNROLL-NO-IC-NEXT: br label [[FOR_END:%.*]]
-; UNROLL-NO-IC: scalar.ph:
; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]]
-; UNROLL-NO-IC: loop:
-; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; UNROLL-NO-IC-NEXT: [[FOR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[FOR_PREV:%.*]], [[LOOP]] ]
-; UNROLL-NO-IC-NEXT: [[CMP:%.*]] = icmp eq i32 [[FOR]], 15
-; UNROLL-NO-IC-NEXT: [[C:%.*]] = icmp eq i1 [[CMP]], true
-; UNROLL-NO-IC-NEXT: [[VEC_DEAD:%.*]] = and i1 [[C]], true
-; UNROLL-NO-IC-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
-; UNROLL-NO-IC-NEXT: [[B1:%.*]] = or i16 [[IV_NEXT]], [[IV_NEXT]]
-; UNROLL-NO-IC-NEXT: [[B3:%.*]] = and i1 [[CMP]], [[C]]
-; UNROLL-NO-IC-NEXT: [[FOR_PREV]] = zext i16 [[B1]] to i32
-; UNROLL-NO-IC-NEXT: [[EXT:%.*]] = zext i1 [[B3]] to i32
-; UNROLL-NO-IC-NEXT: [[A_GEP:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[IV]]
-; UNROLL-NO-IC-NEXT: store i32 0, ptr [[A_GEP]], align 4
-; UNROLL-NO-IC-NEXT: br i1 [[VEC_DEAD]], label [[FOR_END]], label [[LOOP]]
; UNROLL-NO-IC: for.end:
-; UNROLL-NO-IC-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], [[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], [[MIDDLE_BLOCK]] ]
-; UNROLL-NO-IC-NEXT: ret i32 [[FOR_LCSSA]]
+; UNROLL-NO-IC-NEXT: ret i32 [[VECTOR_RECUR_EXTRACT_FOR_PHI]]
;
; UNROLL-NO-VF-LABEL: @sink_after_dead_inst(
; UNROLL-NO-VF-NEXT: entry:
@@ -3382,26 +3251,9 @@ define i32 @sink_after_dead_inst(ptr %A.ptr, i32 %n) {
; UNROLL-NO-VF-NEXT: [[TMP11:%.*]] = icmp eq i32 [[TMP7]], 16
; UNROLL-NO-VF-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; UNROLL-NO-VF: middle.block:
-; UNROLL-NO-VF-NEXT: br label [[FOR_END:%.*]]
-; UNROLL-NO-VF: scalar.ph:
; UNROLL-NO-VF-NEXT: br label [[LOOP:%.*]]
-; UNROLL-NO-VF: loop:
-; UNROLL-NO-VF-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; UNROLL-NO-VF-NEXT: [[FOR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[FOR_PREV:%.*]], [[LOOP]] ]
-; UNROLL-NO-VF-NEXT: [[CMP:%.*]] = icmp eq i32 [[FOR]], 15
-; UNROLL-NO-VF-NEXT: [[C:%.*]] = icmp eq i1 [[CMP]], true
-; UNROLL-NO-VF-NEXT: [[VEC_DEAD:%.*]] = and i1 [[C]], true
-; UNROLL-NO-VF-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
-; UNROLL-NO-VF-NEXT: [[B1:%.*]] = or i16 [[IV_NEXT]], [[IV_NEXT]]
-; UNROLL-NO-VF-NEXT: [[B3:%.*]] = and i1 [[CMP]], [[C]]
-; UNROLL-NO-VF-NEXT: [[FOR_PREV]] = zext i16 [[B1]] to i32
-; UNROLL-NO-VF-NEXT: [[EXT:%.*]] = zext i1 [[B3]] to i32
-; UNROLL-NO-VF-NEXT: [[A_GEP:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[IV]]
-; UNROLL-NO-VF-NEXT: store i32 0, ptr [[A_GEP]], align 4
-; UNROLL-NO-VF-NEXT: br i1 [[VEC_DEAD]], label [[FOR_END]], label [[LOOP]]
; UNROLL-NO-VF: for.end:
-; UNROLL-NO-VF-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], [[LOOP]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ]
-; UNROLL-NO-VF-NEXT: ret i32 [[FOR_LCSSA]]
+; UNROLL-NO-VF-NEXT: ret i32 [[TMP10]]
;
; SINK-AFTER-LABEL: @sink_after_dead_inst(
; SINK-AFTER-NEXT: entry:
@@ -3423,26 +3275,9 @@ define i32 @sink_after_dead_inst(ptr %A.ptr, i32 %n) {
; SINK-AFTER-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; SINK-AFTER: middle.block:
; SINK-AFTER-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[TMP3]], i32 2
-; SINK-AFTER-NEXT: br label [[FOR_END:%.*]]
-; SINK-AFTER: scalar.ph:
; SINK-AFTER-NEXT: br label [[LOOP:%.*]]
-; SINK-AFTER: loop:
-; SINK-AFTER-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; SINK-AFTER-NEXT: [[FOR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[FOR_PREV:%.*]], [[LOOP]] ]
-; SINK-AFTER-NEXT: [[CMP:%.*]] = icmp eq i32 [[FOR]], 15
-; SINK-AFTER-NEXT: [[C:%.*]] = icmp eq i1 [[CMP]], true
-; SINK-AFTER-NEXT: [[VEC_DEAD:%.*]] = and i1 [[C]], true
-; SINK-AFTER-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
-; SINK-AFTER-NEXT: [[B1:%.*]] = or i16 [[IV_NEXT]], [[IV_NEXT]]
-; SINK-AFTER-NEXT: [[B3:%.*]] = and i1 [[CMP]], [[C]]
-; SINK-AFTER-NEXT: [[FOR_PREV]] = zext i16 [[B1]] to i32
-; SINK-AFTER-NEXT: [[EXT:%.*]] = zext i1 [[B3]] to i32
-; SINK-AFTER-NEXT: [[A_GEP:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[IV]]
-; SINK-AFTER-NEXT: store i32 0, ptr [[A_GEP]], align 4
-; SINK-AFTER-NEXT: br i1 [[VEC_DEAD]], label [[FOR_END]], label [[LOOP]]
; SINK-AFTER: for.end:
-; SINK-AFTER-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], [[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], [[MIDDLE_BLOCK]] ]
-; SINK-AFTER-NEXT: ret i32 [[FOR_LCSSA]]
+; SINK-AFTER-NEXT: ret i32 [[VECTOR_RECUR_EXTRACT_FOR_PHI]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/flags.ll b/llvm/test/Transforms/LoopVectorize/flags.ll
index cb86f5f..2268085 100644
--- a/llvm/test/Transforms/LoopVectorize/flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/flags.ll
@@ -129,20 +129,8 @@ define float @fast_math(ptr noalias %s) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP3:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP1]])
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[S]], i64 [[IV]]
-; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = fadd fast float [[RED]], [[TMP4]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 256
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[LOOP]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[ADD_LCSSA]]
+; CHECK-NEXT: ret float [[TMP3]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/float-induction.ll b/llvm/test/Transforms/LoopVectorize/float-induction.ll
index 901f67e..f56699a 100644
--- a/llvm/test/Transforms/LoopVectorize/float-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/float-induction.ll
@@ -1649,11 +1649,7 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) {
; VEC4_INTERL1-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; VEC4_INTERL1-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; VEC4_INTERL1: middle.block:
-; VEC4_INTERL1-NEXT: br label [[EXIT:%.*]]
-; VEC4_INTERL1: scalar.ph:
; VEC4_INTERL1-NEXT: br label [[LOOP:%.*]]
-; VEC4_INTERL1: loop:
-; VEC4_INTERL1-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]]
; VEC4_INTERL1: exit:
; VEC4_INTERL1-NEXT: ret i32 0
;
@@ -1672,11 +1668,7 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) {
; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; VEC4_INTERL2-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; VEC4_INTERL2: middle.block:
-; VEC4_INTERL2-NEXT: br label [[EXIT:%.*]]
-; VEC4_INTERL2: scalar.ph:
; VEC4_INTERL2-NEXT: br label [[LOOP:%.*]]
-; VEC4_INTERL2: loop:
-; VEC4_INTERL2-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]]
; VEC4_INTERL2: exit:
; VEC4_INTERL2-NEXT: ret i32 0
;
@@ -1699,11 +1691,7 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) {
; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; VEC1_INTERL2-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; VEC1_INTERL2: middle.block:
-; VEC1_INTERL2-NEXT: br label [[EXIT:%.*]]
-; VEC1_INTERL2: scalar.ph:
; VEC1_INTERL2-NEXT: br label [[LOOP:%.*]]
-; VEC1_INTERL2: loop:
-; VEC1_INTERL2-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]]
; VEC1_INTERL2: exit:
; VEC1_INTERL2-NEXT: ret i32 0
;
diff --git a/llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll b/llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll
index 93031c7..555e695 100644
--- a/llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll
+++ b/llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll
@@ -66,22 +66,9 @@ define float @minloopattr(ptr nocapture readonly %arg) #0 {
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[TMP4]])
-; CHECK-NEXT: br label [[OUT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[T1:%.*]] = phi i64 [ [[T7:%.*]], [[LOOP]] ], [ 1, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[T2:%.*]] = phi float [ [[T6:%.*]], [[LOOP]] ], [ [[T]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[T3:%.*]] = getelementptr float, ptr [[ARG]], i64 [[T1]]
-; CHECK-NEXT: [[T4:%.*]] = load float, ptr [[T3]], align 4
-; CHECK-NEXT: [[T5:%.*]] = fcmp olt float [[T2]], [[T4]]
-; CHECK-NEXT: [[T6]] = select i1 [[T5]], float [[T2]], float [[T4]]
-; CHECK-NEXT: [[T7]] = add i64 [[T1]], 1
-; CHECK-NEXT: [[T8:%.*]] = icmp eq i64 [[T7]], 65537
-; CHECK-NEXT: br i1 [[T8]], label [[OUT]], label [[LOOP]]
; CHECK: out:
-; CHECK-NEXT: [[T6_LCSSA:%.*]] = phi float [ [[T6]], [[LOOP]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[T6_LCSSA]]
+; CHECK-NEXT: ret float [[TMP6]]
;
top:
%t = load float, ptr %arg
diff --git a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll
index 616f156..5b7c27a 100644
--- a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll
@@ -113,3 +113,49 @@ loop:
exit:
ret float %max.next
}
+
+define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) {
+; CHECK-LABEL: define float @test_fmax_and_fmin(
+; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]]
+; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4
+; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4
+; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]])
+; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]])
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ]
+; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ]
+; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]]
+; CHECK-NEXT: ret float [[SUB]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %min = phi float [ 0.000000e+00, %entry ], [ %min.next, %loop ]
+ %max = phi float [ 0.000000e+00, %entry ], [ %max.next, %loop ]
+ %gep.src.0 = getelementptr inbounds nuw float, ptr %src.0, i64 %iv
+ %gep.src.1 = getelementptr inbounds nuw float, ptr %src.1, i64 %iv
+ %l.0 = load float, ptr %gep.src.0, align 4
+ %l.1 = load float, ptr %gep.src.1, align 4
+ %max.next = tail call noundef float @llvm.maxnum.f32(float %max, float %l.0)
+ %min.next = tail call noundef float @llvm.minnum.f32(float %min, float %l.1)
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, %n
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ %sub = fsub float %max.next, %min.next
+ ret float %sub
+}
diff --git a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll
index 1a2b233..8b6a6e1 100644
--- a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll
@@ -683,3 +683,49 @@ loop:
exit:
ret float %max.next
}
+
+define float @test_fmax_and_fmax(ptr %src.0, ptr %src.1, i64 %n) {
+; CHECK-LABEL: define float @test_fmax_and_fmax(
+; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]]
+; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4
+; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4
+; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]])
+; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]])
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ]
+; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ]
+; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]]
+; CHECK-NEXT: ret float [[SUB]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %min = phi float [ 0.000000e+00, %entry ], [ %min.next, %loop ]
+ %max = phi float [ 0.000000e+00, %entry ], [ %max.next, %loop ]
+ %gep.src.0 = getelementptr inbounds nuw float, ptr %src.0, i64 %iv
+ %gep.src.1 = getelementptr inbounds nuw float, ptr %src.1, i64 %iv
+ %l.0 = load float, ptr %gep.src.0, align 4
+ %l.1 = load float, ptr %gep.src.1, align 4
+ %max.next = tail call noundef float @llvm.maxnum.f32(float %max, float %l.0)
+ %min.next = tail call noundef float @llvm.minnum.f32(float %min, float %l.1)
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, %n
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ %sub = fsub float %max.next, %min.next
+ ret float %sub
+}
diff --git a/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll b/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll
index c86e271..f7376a0 100644
--- a/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll
@@ -67,23 +67,7 @@ define i32 @test(ptr nocapture %f) #0 {
; UNROLL-NOSIMPLIFY-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; UNROLL-NOSIMPLIFY: middle.block:
-; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_END:%.*]]
-; UNROLL-NOSIMPLIFY: scalar.ph:
-; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY:%.*]]
-; UNROLL-NOSIMPLIFY: for.body:
-; UNROLL-NOSIMPLIFY-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; UNROLL-NOSIMPLIFY-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[INDVARS_IV]]
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; UNROLL-NOSIMPLIFY-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP11]], 100
-; UNROLL-NOSIMPLIFY-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
-; UNROLL-NOSIMPLIFY: if.then:
-; UNROLL-NOSIMPLIFY-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], 20
-; UNROLL-NOSIMPLIFY-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
-; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC]]
-; UNROLL-NOSIMPLIFY: for.inc:
-; UNROLL-NOSIMPLIFY-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; UNROLL-NOSIMPLIFY-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 128
-; UNROLL-NOSIMPLIFY-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]]
+; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC:%.*]]
; UNROLL-NOSIMPLIFY: for.end:
; UNROLL-NOSIMPLIFY-NEXT: ret i32 0
;
@@ -449,25 +433,7 @@ define void @minimal_bit_widths(i1 %c) {
; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; UNROLL-NOSIMPLIFY: middle.block:
-; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_END:%.*]]
-; UNROLL-NOSIMPLIFY: scalar.ph:
-; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY:%.*]]
-; UNROLL-NOSIMPLIFY: for.body:
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = phi i64 [ [[TMP9:%.*]], [[FOR_INC:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ 1000, [[SCALAR_PH]] ]
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr undef, i64 [[TMP1]]
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
-; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]]
-; UNROLL-NOSIMPLIFY: if.then:
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i8
-; UNROLL-NOSIMPLIFY-NEXT: store i8 [[TMP6]], ptr [[TMP3]], align 1
-; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC]]
-; UNROLL-NOSIMPLIFY: for.inc:
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP9]] = add nuw nsw i64 [[TMP1]], 1
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP7]] = add i64 [[TMP2]], -1
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
-; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]]
+; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC:%.*]]
; UNROLL-NOSIMPLIFY: for.end:
; UNROLL-NOSIMPLIFY-NEXT: ret void
;
@@ -575,26 +541,7 @@ define void @minimal_bit_widths_with_aliasing_store(i1 %c, ptr %ptr) {
; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; UNROLL-NOSIMPLIFY: middle.block:
-; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_END:%.*]]
-; UNROLL-NOSIMPLIFY: scalar.ph:
-; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY:%.*]]
-; UNROLL-NOSIMPLIFY: for.body:
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = phi i64 [ [[TMP9:%.*]], [[FOR_INC:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ 1000, [[SCALAR_PH]] ]
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP1]]
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
-; UNROLL-NOSIMPLIFY-NEXT: store i8 0, ptr [[TMP3]], align 1
-; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]]
-; UNROLL-NOSIMPLIFY: if.then:
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i8
-; UNROLL-NOSIMPLIFY-NEXT: store i8 [[TMP6]], ptr [[TMP3]], align 1
-; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC]]
-; UNROLL-NOSIMPLIFY: for.inc:
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP9]] = add nuw nsw i64 [[TMP1]], 1
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP7]] = add i64 [[TMP2]], -1
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
-; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]]
+; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC:%.*]]
; UNROLL-NOSIMPLIFY: for.end:
; UNROLL-NOSIMPLIFY-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll b/llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll
index f0b32c6..ccf05d7 100644
--- a/llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll
@@ -24,17 +24,7 @@ define void @multiple_iv_uses_in_same_instruction(ptr %ptr) {
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [100 x [100 x i32]], ptr [[PTR]], i64 0, i64 [[IV]], i64 [[IV]]
-; CHECK-NEXT: [[T:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-NEXT: store i32 [[T]], ptr [[GEP]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/induction-step.ll b/llvm/test/Transforms/LoopVectorize/induction-step.ll
index 362de0e..53d5ac4 100644
--- a/llvm/test/Transforms/LoopVectorize/induction-step.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction-step.ll
@@ -291,18 +291,6 @@ define void @iv_no_binary_op_in_descriptor(i1 %c, ptr %dst) {
; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT_P:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i64 [[IV]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT:%.*]] = add i64 [[IV]], 1
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[IV_NEXT_P]] = phi i64 [ [[IV_NEXT]], %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT_P]], 1000
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll
index 60c844c..cc55a51 100644
--- a/llvm/test/Transforms/LoopVectorize/induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction.ll
@@ -2764,19 +2764,9 @@ define i32 @i8_loop() nounwind readnone ssp uwtable {
; CHECK-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> [[TMP0]])
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[A_0:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[A_0_AND:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[B_0:%.*]] = phi i8 [ 0, [[SCALAR_PH]] ], [ [[B_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[A_0_AND]] = and i32 [[A_0]], 4
-; CHECK-NEXT: [[B_NEXT]] = add i8 [[B_0]], -1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[B_NEXT]], 0
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[A_0_AND_LCSSA:%.*]] = phi i32 [ [[A_0_AND]], [[LOOP]] ], [ [[TMP2]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[A_0_AND_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP2]]
;
; IND-LABEL: @i8_loop(
; IND-NEXT: entry:
@@ -2789,11 +2779,7 @@ define i32 @i8_loop() nounwind readnone ssp uwtable {
; IND-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
; IND-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; IND: middle.block:
-; IND-NEXT: br label [[EXIT:%.*]]
-; IND: scalar.ph:
; IND-NEXT: br label [[LOOP:%.*]]
-; IND: loop:
-; IND-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]]
; IND: exit:
; IND-NEXT: ret i32 0
;
@@ -2808,11 +2794,7 @@ define i32 @i8_loop() nounwind readnone ssp uwtable {
; UNROLL-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
; UNROLL-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; UNROLL: middle.block:
-; UNROLL-NEXT: br label [[EXIT:%.*]]
-; UNROLL: scalar.ph:
; UNROLL-NEXT: br label [[LOOP:%.*]]
-; UNROLL: loop:
-; UNROLL-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]]
; UNROLL: exit:
; UNROLL-NEXT: ret i32 0
;
@@ -2833,19 +2815,9 @@ define i32 @i8_loop() nounwind readnone ssp uwtable {
; UNROLL-NO-IC: middle.block:
; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = and <2 x i32> [[TMP1]], [[TMP0]]
; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> [[BIN_RDX]])
-; UNROLL-NO-IC-NEXT: br label [[EXIT:%.*]]
-; UNROLL-NO-IC: scalar.ph:
; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]]
-; UNROLL-NO-IC: loop:
-; UNROLL-NO-IC-NEXT: [[A_0:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[A_0_AND:%.*]], [[LOOP]] ]
-; UNROLL-NO-IC-NEXT: [[B_0:%.*]] = phi i8 [ 0, [[SCALAR_PH]] ], [ [[B_NEXT:%.*]], [[LOOP]] ]
-; UNROLL-NO-IC-NEXT: [[A_0_AND]] = and i32 [[A_0]], 4
-; UNROLL-NO-IC-NEXT: [[B_NEXT]] = add i8 [[B_0]], -1
-; UNROLL-NO-IC-NEXT: [[EC:%.*]] = icmp eq i8 [[B_NEXT]], 0
-; UNROLL-NO-IC-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; UNROLL-NO-IC: exit:
-; UNROLL-NO-IC-NEXT: [[A_0_AND_LCSSA:%.*]] = phi i32 [ [[A_0_AND]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ]
-; UNROLL-NO-IC-NEXT: ret i32 [[A_0_AND_LCSSA]]
+; UNROLL-NO-IC-NEXT: ret i32 [[TMP3]]
;
; INTERLEAVE-LABEL: @i8_loop(
; INTERLEAVE-NEXT: entry:
@@ -2858,11 +2830,7 @@ define i32 @i8_loop() nounwind readnone ssp uwtable {
; INTERLEAVE-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
; INTERLEAVE-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; INTERLEAVE: middle.block:
-; INTERLEAVE-NEXT: br label [[EXIT:%.*]]
-; INTERLEAVE: scalar.ph:
; INTERLEAVE-NEXT: br label [[LOOP:%.*]]
-; INTERLEAVE: loop:
-; INTERLEAVE-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]]
; INTERLEAVE: exit:
; INTERLEAVE-NEXT: ret i32 0
;
@@ -2897,19 +2865,9 @@ define i32 @i16_loop() nounwind readnone ssp uwtable {
; CHECK-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> [[TMP0]])
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[A_0:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[A_0_AND:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[B_0:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[B_0_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[A_0_AND]] = and i32 [[A_0]], 4
-; CHECK-NEXT: [[B_0_NEXT]] = add i16 [[B_0]], -1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[B_0_NEXT]], 0
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[A_0_AND_LCSSA:%.*]] = phi i32 [ [[A_0_AND]], [[LOOP]] ], [ [[TMP2]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[A_0_AND_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP2]]
;
; IND-LABEL: @i16_loop(
; IND-NEXT: entry:
@@ -2922,11 +2880,7 @@ define i32 @i16_loop() nounwind readnone ssp uwtable {
; IND-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 65536
; IND-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; IND: middle.block:
-; IND-NEXT: br label [[EXIT:%.*]]
-; IND: scalar.ph:
; IND-NEXT: br label [[LOOP:%.*]]
-; IND: loop:
-; IND-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]]
; IND: exit:
; IND-NEXT: ret i32 0
;
@@ -2941,11 +2895,7 @@ define i32 @i16_loop() nounwind readnone ssp uwtable {
; UNROLL-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 65536
; UNROLL-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; UNROLL: middle.block:
-; UNROLL-NEXT: br label [[EXIT:%.*]]
-; UNROLL: scalar.ph:
; UNROLL-NEXT: br label [[LOOP:%.*]]
-; UNROLL: loop:
-; UNROLL-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]]
; UNROLL: exit:
; UNROLL-NEXT: ret i32 0
;
@@ -2966,19 +2916,9 @@ define i32 @i16_loop() nounwind readnone ssp uwtable {
; UNROLL-NO-IC: middle.block:
; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = and <2 x i32> [[TMP1]], [[TMP0]]
; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> [[BIN_RDX]])
-; UNROLL-NO-IC-NEXT: br label [[EXIT:%.*]]
-; UNROLL-NO-IC: scalar.ph:
; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]]
-; UNROLL-NO-IC: loop:
-; UNROLL-NO-IC-NEXT: [[A_0:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[A_0_AND:%.*]], [[LOOP]] ]
-; UNROLL-NO-IC-NEXT: [[B_0:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[B_0_NEXT:%.*]], [[LOOP]] ]
-; UNROLL-NO-IC-NEXT: [[A_0_AND]] = and i32 [[A_0]], 4
-; UNROLL-NO-IC-NEXT: [[B_0_NEXT]] = add i16 [[B_0]], -1
-; UNROLL-NO-IC-NEXT: [[EC:%.*]] = icmp eq i16 [[B_0_NEXT]], 0
-; UNROLL-NO-IC-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; UNROLL-NO-IC: exit:
-; UNROLL-NO-IC-NEXT: [[A_0_AND_LCSSA:%.*]] = phi i32 [ [[A_0_AND]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ]
-; UNROLL-NO-IC-NEXT: ret i32 [[A_0_AND_LCSSA]]
+; UNROLL-NO-IC-NEXT: ret i32 [[TMP3]]
;
; INTERLEAVE-LABEL: @i16_loop(
; INTERLEAVE-NEXT: entry:
@@ -2991,11 +2931,7 @@ define i32 @i16_loop() nounwind readnone ssp uwtable {
; INTERLEAVE-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 65536
; INTERLEAVE-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; INTERLEAVE: middle.block:
-; INTERLEAVE-NEXT: br label [[EXIT:%.*]]
-; INTERLEAVE: scalar.ph:
; INTERLEAVE-NEXT: br label [[LOOP:%.*]]
-; INTERLEAVE: loop:
-; INTERLEAVE-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]]
; INTERLEAVE: exit:
; INTERLEAVE-NEXT: ret i32 0
;
@@ -5025,28 +4961,9 @@ define i32 @PR32419(i32 %a, i16 %b) {
; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP15]])
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I:%.*]] = phi i32 [ -20, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-NEXT: [[VAR0:%.*]] = phi i32 [ [[A]], [[SCALAR_PH]] ], [ [[VAR6:%.*]], [[FOR_INC]] ]
-; CHECK-NEXT: [[VAR1:%.*]] = trunc i32 [[I]] to i16
-; CHECK-NEXT: [[VAR2:%.*]] = icmp eq i16 [[VAR1]], 0
-; CHECK-NEXT: br i1 [[VAR2]], label [[FOR_INC]], label [[FOR_COND:%.*]]
-; CHECK: for.cond:
-; CHECK-NEXT: [[VAR3:%.*]] = urem i16 [[B]], [[VAR1]]
-; CHECK-NEXT: br label [[FOR_INC]]
-; CHECK: for.inc:
-; CHECK-NEXT: [[VAR4:%.*]] = phi i16 [ [[VAR3]], [[FOR_COND]] ], [ 0, [[FOR_BODY]] ]
-; CHECK-NEXT: [[VAR5:%.*]] = sext i16 [[VAR4]] to i32
-; CHECK-NEXT: [[VAR6]] = or i32 [[VAR0]], [[VAR5]]
-; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[I_NEXT]], 0
-; CHECK-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]]
+; CHECK-NEXT: br label [[FOR_INC:%.*]]
; CHECK: for.end:
-; CHECK-NEXT: [[VAR7:%.*]] = phi i32 [ [[VAR6]], [[FOR_INC]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[VAR7]]
+; CHECK-NEXT: ret i32 [[TMP17]]
;
; IND-LABEL: @PR32419(
; IND-NEXT: entry:
@@ -5086,15 +5003,7 @@ define i32 @PR32419(i32 %a, i16 %b) {
; IND-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], 20
; IND-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]]
; IND: middle.block:
-; IND-NEXT: br label [[FOR_END:%.*]]
-; IND: scalar.ph:
-; IND-NEXT: br label [[FOR_BODY:%.*]]
-; IND: for.body:
-; IND-NEXT: br i1 poison, label [[FOR_INC:%.*]], label [[FOR_COND:%.*]]
-; IND: for.cond:
-; IND-NEXT: br label [[FOR_INC]]
-; IND: for.inc:
-; IND-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
+; IND-NEXT: br label [[FOR_INC:%.*]]
; IND: for.end:
; IND-NEXT: [[VAR7:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP14]])
; IND-NEXT: ret i32 [[VAR7]]
@@ -5160,15 +5069,7 @@ define i32 @PR32419(i32 %a, i16 %b) {
; UNROLL-NEXT: [[TMP28:%.*]] = icmp eq i32 [[INDEX_NEXT]], 20
; UNROLL-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]]
; UNROLL: middle.block:
-; UNROLL-NEXT: br label [[FOR_END:%.*]]
-; UNROLL: scalar.ph:
-; UNROLL-NEXT: br label [[FOR_BODY:%.*]]
-; UNROLL: for.body:
-; UNROLL-NEXT: br i1 poison, label [[FOR_INC:%.*]], label [[FOR_COND:%.*]]
-; UNROLL: for.cond:
-; UNROLL-NEXT: br label [[FOR_INC]]
-; UNROLL: for.inc:
-; UNROLL-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
+; UNROLL-NEXT: br label [[FOR_INC:%.*]]
; UNROLL: for.end:
; UNROLL-NEXT: [[BIN_RDX:%.*]] = or <2 x i32> [[TMP27]], [[TMP26]]
; UNROLL-NEXT: [[VAR7:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[BIN_RDX]])
@@ -5239,28 +5140,9 @@ define i32 @PR32419(i32 %a, i16 %b) {
; UNROLL-NO-IC: middle.block:
; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = or <2 x i32> [[TMP29]], [[TMP28]]
; UNROLL-NO-IC-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[BIN_RDX]])
-; UNROLL-NO-IC-NEXT: br label [[FOR_END:%.*]]
-; UNROLL-NO-IC: scalar.ph:
-; UNROLL-NO-IC-NEXT: br label [[FOR_BODY:%.*]]
-; UNROLL-NO-IC: for.body:
-; UNROLL-NO-IC-NEXT: [[I:%.*]] = phi i32 [ -20, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; UNROLL-NO-IC-NEXT: [[VAR0:%.*]] = phi i32 [ [[A]], [[SCALAR_PH]] ], [ [[VAR6:%.*]], [[FOR_INC]] ]
-; UNROLL-NO-IC-NEXT: [[VAR1:%.*]] = trunc i32 [[I]] to i16
-; UNROLL-NO-IC-NEXT: [[VAR2:%.*]] = icmp eq i16 [[VAR1]], 0
-; UNROLL-NO-IC-NEXT: br i1 [[VAR2]], label [[FOR_INC]], label [[FOR_COND:%.*]]
-; UNROLL-NO-IC: for.cond:
-; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = urem i16 [[B]], [[VAR1]]
-; UNROLL-NO-IC-NEXT: br label [[FOR_INC]]
-; UNROLL-NO-IC: for.inc:
-; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i16 [ [[VAR3]], [[FOR_COND]] ], [ 0, [[FOR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = sext i16 [[VAR4]] to i32
-; UNROLL-NO-IC-NEXT: [[VAR6]] = or i32 [[VAR0]], [[VAR5]]
-; UNROLL-NO-IC-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
-; UNROLL-NO-IC-NEXT: [[COND:%.*]] = icmp eq i32 [[I_NEXT]], 0
-; UNROLL-NO-IC-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]]
+; UNROLL-NO-IC-NEXT: br label [[FOR_INC:%.*]]
; UNROLL-NO-IC: for.end:
-; UNROLL-NO-IC-NEXT: [[VAR7:%.*]] = phi i32 [ [[VAR6]], [[FOR_INC]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ]
-; UNROLL-NO-IC-NEXT: ret i32 [[VAR7]]
+; UNROLL-NO-IC-NEXT: ret i32 [[TMP31]]
;
; INTERLEAVE-LABEL: @PR32419(
; INTERLEAVE-NEXT: entry:
@@ -5818,23 +5700,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[TRUNC_IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[TRUNC_IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RECUR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC]], align 4
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[RECUR]]
-; CHECK-NEXT: [[TRUNC_IV_NEXT]] = add i32 [[TRUNC_IV]], 1
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[IV_TRUNC]] = trunc i64 [[IV]] to i32
-; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr i32, ptr [[DST]], i32 [[IV_TRUNC]]
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[IV_TRUNC]], [[MUL]]
-; CHECK-NEXT: store i32 [[ADD]], ptr [[DST_GEP]], align 4
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[TRUNC_IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -5862,11 +5728,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; IND-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; IND-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]]
; IND: middle.block:
-; IND-NEXT: br label [[EXIT:%.*]]
-; IND: scalar.ph:
; IND-NEXT: br label [[LOOP:%.*]]
-; IND: loop:
-; IND-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]]
; IND: exit:
; IND-NEXT: ret void
;
@@ -5900,11 +5762,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; UNROLL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; UNROLL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]]
; UNROLL: middle.block:
-; UNROLL-NEXT: br label [[EXIT:%.*]]
-; UNROLL: scalar.ph:
; UNROLL-NEXT: br label [[LOOP:%.*]]
-; UNROLL: loop:
-; UNROLL-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]]
; UNROLL: exit:
; UNROLL-NEXT: ret void
;
@@ -5937,23 +5795,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; UNROLL-NO-IC-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]]
; UNROLL-NO-IC: middle.block:
-; UNROLL-NO-IC-NEXT: br label [[EXIT:%.*]]
-; UNROLL-NO-IC: scalar.ph:
; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]]
-; UNROLL-NO-IC: loop:
-; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; UNROLL-NO-IC-NEXT: [[TRUNC_IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[TRUNC_IV_NEXT:%.*]], [[LOOP]] ]
-; UNROLL-NO-IC-NEXT: [[RECUR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
-; UNROLL-NO-IC-NEXT: [[LV:%.*]] = load i32, ptr [[SRC]], align 4
-; UNROLL-NO-IC-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[RECUR]]
-; UNROLL-NO-IC-NEXT: [[TRUNC_IV_NEXT]] = add i32 [[TRUNC_IV]], 1
-; UNROLL-NO-IC-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; UNROLL-NO-IC-NEXT: [[IV_TRUNC]] = trunc i64 [[IV]] to i32
-; UNROLL-NO-IC-NEXT: [[DST_GEP:%.*]] = getelementptr i32, ptr [[DST]], i32 [[IV_TRUNC]]
-; UNROLL-NO-IC-NEXT: [[ADD:%.*]] = add i32 [[IV_TRUNC]], [[MUL]]
-; UNROLL-NO-IC-NEXT: store i32 [[ADD]], ptr [[DST_GEP]], align 4
-; UNROLL-NO-IC-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[TRUNC_IV_NEXT]], 100
-; UNROLL-NO-IC-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]]
; UNROLL-NO-IC: exit:
; UNROLL-NO-IC-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll b/llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll
index 9222af9..8975c05 100644
--- a/llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll
+++ b/llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll
@@ -18,23 +18,9 @@ define i32 @one_direct_branch(ptr %src) {
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP3]], i32 3
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC_GEP]], align 4
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 25500, [[LV]]
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[PHI_XOR:%.*]] = phi i32 [ [[XOR]], [[LOOP]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1
-; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
-; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[PHI_XOR]], [[LOOP_LATCH]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[XOR_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP5]]
;
entry:
br label %loop
@@ -73,26 +59,9 @@ define i32 @two_direct_branch(ptr %src) {
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP3]], i32 3
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC_GEP]], align 4
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 25500, [[LV]]
-; CHECK-NEXT: br label [[BB:%.*]]
-; CHECK: bb:
-; CHECK-NEXT: [[PHI_XOR_1:%.*]] = phi i32 [ [[XOR]], [[LOOP]] ]
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[PHI_XOR:%.*]] = phi i32 [ [[PHI_XOR_1]], [[BB]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1
-; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
-; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[PHI_XOR]], [[LOOP_LATCH]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[XOR_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP5]]
;
entry:
br label %loop
@@ -141,26 +110,9 @@ define i32 @cond_branch(i32 %a, ptr %src) {
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[PREDPHI]], i32 3
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]]
-; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC_GEP]], align 4
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 25500, [[LV]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[IV]], [[A]]
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_LATCH]], label [[THEN:%.*]]
-; CHECK: then:
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[PHI_XOR:%.*]] = phi i32 [ [[XOR]], [[LOOP]] ], [ 10, [[THEN]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1
-; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
-; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[PHI_XOR]], [[LOOP_LATCH]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[XOR_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP7]]
;
entry:
br label %loop
@@ -205,18 +157,9 @@ define i32 @optimizable_trunc_used_outside() {
; CHECK-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[VEC_IND]], i32 3
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT_I_I:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EXITCOND_NOT_I_I]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[IV_TRUNC_LCSSA:%.*]] = phi i32 [ [[IV_TRUNC]], [[LOOP]] ], [ [[TMP1]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[IV_TRUNC_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP1]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll b/llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll
index 1128dd3..2c97bb7 100644
--- a/llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll
@@ -33,19 +33,6 @@ define void @i65_induction_with_negative_step(ptr %dst) {
; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[IV_I65:%.*]] = phi i65 [ 0, %[[SCALAR_PH]] ], [ [[IV_I65_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[FOR:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TRUNC:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[TRUNC]] = trunc i65 [[IV_I65]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TRUNC]]
-; CHECK-NEXT: store i64 [[FOR]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: [[IV_I65_NEXT]] = add i65 [[IV_I65]], -1
-; CHECK-NEXT: br i1 [[ICMP]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll
index 85e7477..eca9c1f 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll
@@ -27,23 +27,6 @@ define void @gep_for_first_member_does_not_dominate_insert_point(ptr %str, ptr n
; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[IV2:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV2_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[OR_1:%.*]] = or disjoint i64 [[IV2]], 1
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i8, ptr [[STR]], i64 [[OR_1]]
-; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[GEP1]], align 1
-; CHECK-NEXT: [[GEP0:%.*]] = getelementptr i8, ptr [[STR]], i64 [[IV2]]
-; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[GEP0]], align 1
-; CHECK-NEXT: [[ADD:%.*]] = add i8 [[TMP9]], [[TMP10]]
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i8 [[ADD]], ptr [[GEP_DST]], align 1
-; CHECK-NEXT: [[IV2_NEXT]] = add i64 [[IV2]], 2
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll
index 4dc9cfd..bd0fd77 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll
@@ -45,23 +45,6 @@ define void @merge_tbaa_interleave_group(ptr nocapture readonly %p, ptr noalias
; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_VEC4R]], ptr [[P]], i64 [[IV]], i32 0
-; CHECK-NEXT: [[TMP19:%.*]] = load double, ptr [[X]], align 8, !tbaa [[TBAA0]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul double [[TMP19]], 2.000000e+00
-; CHECK-NEXT: [[X4:%.*]] = getelementptr inbounds [20 x %struct.Vec2r], ptr [[CP]], i64 0, i64 [[IV]], i32 0
-; CHECK-NEXT: store double [[MUL]], ptr [[X4]], align 8, !tbaa [[TBAA10:![0-9]+]]
-; CHECK-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_VEC4R]], ptr [[P]], i64 [[IV]], i32 1
-; CHECK-NEXT: [[TMP20:%.*]] = load double, ptr [[Y]], align 8, !tbaa [[TBAA5]]
-; CHECK-NEXT: [[MUL7:%.*]] = fmul double [[TMP20]], 3.000000e+00
-; CHECK-NEXT: [[Y10:%.*]] = getelementptr inbounds [20 x %struct.Vec2r], ptr [[CP]], i64 0, i64 [[IV]], i32 1
-; CHECK-NEXT: store double [[MUL7]], ptr [[Y10]], align 8, !tbaa [[TBAA12:![0-9]+]]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 4
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -126,20 +109,20 @@ define void @ir_tbaa_different(ptr %base, ptr %end, ptr %src) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[SRC]], align 4, !alias.scope [[META13:![0-9]+]]
+; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[SRC]], align 4, !alias.scope [[META10:![0-9]+]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[TMP11]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT]], <2 x float> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x float>, ptr [[NEXT_GEP]], align 4, !alias.scope [[META16:![0-9]+]], !noalias [[META13]]
+; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x float>, ptr [[NEXT_GEP]], align 4, !alias.scope [[META13:![0-9]+]], !noalias [[META10]]
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x float> [[WIDE_VEC]], <4 x float> poison, <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <4 x float> [[WIDE_VEC]], <4 x float> poison, <2 x i32> <i32 1, i32 3>
; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x float> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP7:%.*]] = fmul <2 x float> [[STRIDED_VEC3]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> [[TMP7]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x float> [[TMP8]], <4 x float> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-; CHECK-NEXT: store <4 x float> [[INTERLEAVED_VEC]], ptr [[NEXT_GEP]], align 4, !alias.scope [[META16]], !noalias [[META13]]
+; CHECK-NEXT: store <4 x float> [[INTERLEAVED_VEC]], ptr [[NEXT_GEP]], align 4, !alias.scope [[META13]], !noalias [[META10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -152,9 +135,9 @@ define void @ir_tbaa_different(ptr %base, ptr %end, ptr %src) {
; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr inbounds nuw i8, ptr [[PTR_IV]], i64 8
; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[PTR_IV]], align 4
; CHECK-NEXT: [[MUL_1:%.*]] = fmul float [[L_1]], [[L_INVAR]]
-; CHECK-NEXT: store float [[MUL_1]], ptr [[PTR_IV]], align 4, !tbaa [[TBAA10]]
+; CHECK-NEXT: store float [[MUL_1]], ptr [[PTR_IV]], align 4, !tbaa [[TBAA16:![0-9]+]]
; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR_IV]], i64 4
-; CHECK-NEXT: [[L_2:%.*]] = load float, ptr [[GEP_1]], align 4, !tbaa [[TBAA12]]
+; CHECK-NEXT: [[L_2:%.*]] = load float, ptr [[GEP_1]], align 4, !tbaa [[TBAA18:![0-9]+]]
; CHECK-NEXT: [[MUL_2:%.*]] = fmul float [[L_2]], [[L_INVAR]]
; CHECK-NEXT: store float [[MUL_2]], ptr [[GEP_1]], align 4
; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]]
@@ -278,15 +261,15 @@ exit:
; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META8:![0-9]+]], [[META9:![0-9]+]]}
; CHECK: [[META8]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META9]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[TBAA10]] = !{[[META11:![0-9]+]], [[META2]], i64 0}
-; CHECK: [[META11]] = !{!"Vec2r", [[META2]], i64 0, [[META2]], i64 8}
-; CHECK: [[TBAA12]] = !{[[META11]], [[META2]], i64 8}
+; CHECK: [[META10]] = !{[[META11:![0-9]+]]}
+; CHECK: [[META11]] = distinct !{[[META11]], [[META12:![0-9]+]]}
+; CHECK: [[META12]] = distinct !{[[META12]], !"LVerDomain"}
; CHECK: [[META13]] = !{[[META14:![0-9]+]]}
-; CHECK: [[META14]] = distinct !{[[META14]], [[META15:![0-9]+]]}
-; CHECK: [[META15]] = distinct !{[[META15]], !"LVerDomain"}
-; CHECK: [[META16]] = !{[[META17:![0-9]+]]}
-; CHECK: [[META17]] = distinct !{[[META17]], [[META15]]}
-; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META8]], [[META9]]}
+; CHECK: [[META14]] = distinct !{[[META14]], [[META12]]}
+; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META8]], [[META9]]}
+; CHECK: [[TBAA16]] = !{[[META17:![0-9]+]], [[META2]], i64 0}
+; CHECK: [[META17]] = !{!"Vec2r", [[META2]], i64 0, [[META2]], i64 8}
+; CHECK: [[TBAA18]] = !{[[META17]], [[META2]], i64 8}
; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META8]]}
; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META8]], [[META9]]}
; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META9]], [[META8]]}
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
index 4885dd2..b4cad11 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
@@ -47,11 +47,7 @@ define void @test_array_load2_store2(i32 %C, i32 %D) {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -124,11 +120,7 @@ define void @test_struct_array_load3_store3() {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -206,11 +198,7 @@ define i32 @test_struct_load4(ptr nocapture readonly %S) {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[SUB8_LCSSA:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: ret i32 [[SUB8_LCSSA]]
@@ -279,13 +267,9 @@ define void @test_struct_store4(ptr noalias nocapture readonly %A, ptr noalias n
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
;
entry:
br label %for.body
@@ -365,13 +349,9 @@ define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_COND_CLEANUP]]
;
entry:
br label %for.body
@@ -619,11 +599,7 @@ define void @load_gap_reverse(ptr noalias nocapture %P1, ptr noalias nocapture %
; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_EXIT]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -681,13 +657,9 @@ define void @mixed_load2_store2(ptr noalias nocapture readonly %A, ptr noalias n
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_COND_CLEANUP]]
;
entry:
br label %for.body
@@ -753,13 +725,9 @@ define void @mixed_load3_store3(ptr nocapture %A) {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
;
entry:
br label %for.body
@@ -836,17 +804,13 @@ define void @int_float_struct(ptr nocapture readonly %A) #0 {
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[ADD3_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]])
; CHECK-NEXT: [[ADD_LCSSA:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
+; CHECK-NEXT: [[ADD3_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]])
; CHECK-NEXT: store i32 [[ADD_LCSSA]], ptr @SA, align 4
; CHECK-NEXT: store float [[ADD3_LCSSA]], ptr @SB, align 4
; CHECK-NEXT: ret void
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/is_fpclass.ll b/llvm/test/Transforms/LoopVectorize/is_fpclass.ll
index ab70c14..6c4ee5b7 100644
--- a/llvm/test/Transforms/LoopVectorize/is_fpclass.ll
+++ b/llvm/test/Transforms/LoopVectorize/is_fpclass.ll
@@ -20,19 +20,7 @@ define void @d() {
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I7:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[I3:%.*]] = load float, ptr null, align 4
-; CHECK-NEXT: [[I4:%.*]] = getelementptr float, ptr @d, i64 [[I]]
-; CHECK-NEXT: [[I5:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[I3]], i32 0)
-; CHECK-NEXT: [[I6:%.*]] = select i1 [[I5]], float 0.000000e+00, float 1.000000e+00
-; CHECK-NEXT: store float [[I6]], ptr [[I4]], align 4
-; CHECK-NEXT: [[I7]] = add i64 [[I]], 1
-; CHECK-NEXT: [[I8:%.*]] = icmp eq i64 [[I7]], 128
-; CHECK-NEXT: br i1 [[I8]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll
index e662039..70b1ea1 100644
--- a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll
+++ b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll
@@ -31,21 +31,8 @@ define i64 @select_decreasing_induction_icmp_const_start(ptr %a) {
; IC1VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP6]], 9223372036854775807
; IC1VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP6]], i64 331
; IC1VF4-NEXT: br label %[[EXIT:.*]]
-; IC1VF4: [[SCALAR_PH:.*]]:
-; IC1VF4-NEXT: br label %[[LOOP:.*]]
-; IC1VF4: [[LOOP]]:
-; IC1VF4-NEXT: [[IV:%.*]] = phi i64 [ 19999, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC1VF4-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
-; IC1VF4-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; IC1VF4-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8
-; IC1VF4-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3
-; IC1VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]]
-; IC1VF4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
-; IC1VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0
-; IC1VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]]
; IC1VF4: [[EXIT]]:
-; IC1VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; IC1VF4-NEXT: ret i64 [[SPEC_SELECT_LCSSA]]
+; IC1VF4-NEXT: ret i64 [[RDX_SELECT]]
;
; IC4VF4-LABEL: define i64 @select_decreasing_induction_icmp_const_start(
; IC4VF4-SAME: ptr [[A:%.*]]) {
@@ -101,21 +88,8 @@ define i64 @select_decreasing_induction_icmp_const_start(ptr %a) {
; IC4VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP18]], 9223372036854775807
; IC4VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP18]], i64 331
; IC4VF4-NEXT: br label %[[EXIT:.*]]
-; IC4VF4: [[SCALAR_PH:.*]]:
-; IC4VF4-NEXT: br label %[[LOOP:.*]]
-; IC4VF4: [[LOOP]]:
-; IC4VF4-NEXT: [[IV:%.*]] = phi i64 [ 19999, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC4VF4-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
-; IC4VF4-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; IC4VF4-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8
-; IC4VF4-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3
-; IC4VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]]
-; IC4VF4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
-; IC4VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0
-; IC4VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]]
; IC4VF4: [[EXIT]]:
-; IC4VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; IC4VF4-NEXT: ret i64 [[SPEC_SELECT_LCSSA]]
+; IC4VF4-NEXT: ret i64 [[RDX_SELECT]]
;
; IC4VF1-LABEL: define i64 @select_decreasing_induction_icmp_const_start(
; IC4VF1-SAME: ptr [[A:%.*]]) {
@@ -159,21 +133,8 @@ define i64 @select_decreasing_induction_icmp_const_start(ptr %a) {
; IC4VF1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[RDX_MINMAX5]], 9223372036854775807
; IC4VF1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[RDX_MINMAX5]], i64 331
; IC4VF1-NEXT: br label %[[EXIT:.*]]
-; IC4VF1: [[SCALAR_PH:.*]]:
-; IC4VF1-NEXT: br label %[[LOOP:.*]]
-; IC4VF1: [[LOOP]]:
-; IC4VF1-NEXT: [[IV:%.*]] = phi i64 [ 19999, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC4VF1-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
-; IC4VF1-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; IC4VF1-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8
-; IC4VF1-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3
-; IC4VF1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]]
-; IC4VF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
-; IC4VF1-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0
-; IC4VF1-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]]
; IC4VF1: [[EXIT]]:
-; IC4VF1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; IC4VF1-NEXT: ret i64 [[SPEC_SELECT_LCSSA]]
+; IC4VF1-NEXT: ret i64 [[RDX_SELECT]]
;
entry:
br label %loop
@@ -227,21 +188,8 @@ define i16 @select_decreasing_induction_icmp_table_i16(i16 noundef %val) {
; IC1VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[TMP7]], 32767
; IC1VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[TMP7]], i16 0
; IC1VF4-NEXT: br label %[[EXIT:.*]]
-; IC1VF4: [[SCALAR_PH:.*]]:
-; IC1VF4-NEXT: br label %[[LOOP:.*]]
-; IC1VF4: [[LOOP]]:
-; IC1VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC1VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
-; IC1VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]]
-; IC1VF4-NEXT: [[LD_TABLE:%.*]] = load i16, ptr [[GEP_TABLE_IV]], align 1
-; IC1VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = icmp ugt i16 [[LD_TABLE]], [[VAL]]
-; IC1VF4-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1
-; IC1VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]]
-; IC1VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0
-; IC1VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]]
; IC1VF4: [[EXIT]]:
-; IC1VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; IC1VF4-NEXT: ret i16 [[SPEC_SELECT_LCSSA]]
+; IC1VF4-NEXT: ret i16 [[RDX_SELECT]]
;
; IC4VF4-LABEL: define i16 @select_decreasing_induction_icmp_table_i16(
; IC4VF4-SAME: i16 noundef [[VAL:%.*]]) {
@@ -460,21 +408,8 @@ define i16 @select_decreasing_induction_icmp_table_i16(i16 noundef %val) {
; IC4VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[TMP116]], 32767
; IC4VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[TMP116]], i16 0
; IC4VF4-NEXT: br label %[[EXIT:.*]]
-; IC4VF4: [[SCALAR_PH:.*]]:
-; IC4VF4-NEXT: br label %[[LOOP:.*]]
-; IC4VF4: [[LOOP]]:
-; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
-; IC4VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]]
-; IC4VF4-NEXT: [[LD_TABLE:%.*]] = load i16, ptr [[GEP_TABLE_IV]], align 1
-; IC4VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = icmp ugt i16 [[LD_TABLE]], [[VAL]]
-; IC4VF4-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1
-; IC4VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]]
-; IC4VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0
-; IC4VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]]
; IC4VF4: [[EXIT]]:
-; IC4VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; IC4VF4-NEXT: ret i16 [[SPEC_SELECT_LCSSA]]
+; IC4VF4-NEXT: ret i16 [[RDX_SELECT]]
;
; IC4VF1-LABEL: define i16 @select_decreasing_induction_icmp_table_i16(
; IC4VF1-SAME: i16 noundef [[VAL:%.*]]) {
@@ -523,21 +458,8 @@ define i16 @select_decreasing_induction_icmp_table_i16(i16 noundef %val) {
; IC4VF1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[RDX_MINMAX5]], 32767
; IC4VF1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[RDX_MINMAX5]], i16 0
; IC4VF1-NEXT: br label %[[EXIT:.*]]
-; IC4VF1: [[SCALAR_PH:.*]]:
-; IC4VF1-NEXT: br label %[[LOOP:.*]]
-; IC4VF1: [[LOOP]]:
-; IC4VF1-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC4VF1-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
-; IC4VF1-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]]
-; IC4VF1-NEXT: [[LD_TABLE:%.*]] = load i16, ptr [[GEP_TABLE_IV]], align 1
-; IC4VF1-NEXT: [[CMP_TABLE_VAL:%.*]] = icmp ugt i16 [[LD_TABLE]], [[VAL]]
-; IC4VF1-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1
-; IC4VF1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]]
-; IC4VF1-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0
-; IC4VF1-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]]
; IC4VF1: [[EXIT]]:
-; IC4VF1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; IC4VF1-NEXT: ret i16 [[SPEC_SELECT_LCSSA]]
+; IC4VF1-NEXT: ret i16 [[RDX_SELECT]]
;
entry:
br label %loop
@@ -592,21 +514,8 @@ define i16 @select_decreasing_induction_icmp_table_half(half noundef %val) {
; IC1VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[TMP7]], 32767
; IC1VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[TMP7]], i16 0
; IC1VF4-NEXT: br label %[[EXIT:.*]]
-; IC1VF4: [[SCALAR_PH:.*]]:
-; IC1VF4-NEXT: br label %[[LOOP:.*]]
-; IC1VF4: [[LOOP]]:
-; IC1VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC1VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
-; IC1VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]]
-; IC1VF4-NEXT: [[LD_TABLE:%.*]] = load half, ptr [[GEP_TABLE_IV]], align 1
-; IC1VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = fcmp ugt half [[LD_TABLE]], [[VAL]]
-; IC1VF4-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1
-; IC1VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]]
-; IC1VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0
-; IC1VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]]
; IC1VF4: [[EXIT]]:
-; IC1VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; IC1VF4-NEXT: ret i16 [[SPEC_SELECT_LCSSA]]
+; IC1VF4-NEXT: ret i16 [[RDX_SELECT]]
;
; IC4VF4-LABEL: define i16 @select_decreasing_induction_icmp_table_half(
; IC4VF4-SAME: half noundef [[VAL:%.*]]) {
@@ -825,21 +734,8 @@ define i16 @select_decreasing_induction_icmp_table_half(half noundef %val) {
; IC4VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[TMP116]], 32767
; IC4VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[TMP116]], i16 0
; IC4VF4-NEXT: br label %[[EXIT:.*]]
-; IC4VF4: [[SCALAR_PH:.*]]:
-; IC4VF4-NEXT: br label %[[LOOP:.*]]
-; IC4VF4: [[LOOP]]:
-; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
-; IC4VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]]
-; IC4VF4-NEXT: [[LD_TABLE:%.*]] = load half, ptr [[GEP_TABLE_IV]], align 1
-; IC4VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = fcmp ugt half [[LD_TABLE]], [[VAL]]
-; IC4VF4-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1
-; IC4VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]]
-; IC4VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0
-; IC4VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]]
; IC4VF4: [[EXIT]]:
-; IC4VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; IC4VF4-NEXT: ret i16 [[SPEC_SELECT_LCSSA]]
+; IC4VF4-NEXT: ret i16 [[RDX_SELECT]]
;
; IC4VF1-LABEL: define i16 @select_decreasing_induction_icmp_table_half(
; IC4VF1-SAME: half noundef [[VAL:%.*]]) {
@@ -888,21 +784,8 @@ define i16 @select_decreasing_induction_icmp_table_half(half noundef %val) {
; IC4VF1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[RDX_MINMAX5]], 32767
; IC4VF1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[RDX_MINMAX5]], i16 0
; IC4VF1-NEXT: br label %[[EXIT:.*]]
-; IC4VF1: [[SCALAR_PH:.*]]:
-; IC4VF1-NEXT: br label %[[LOOP:.*]]
-; IC4VF1: [[LOOP]]:
-; IC4VF1-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC4VF1-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
-; IC4VF1-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]]
-; IC4VF1-NEXT: [[LD_TABLE:%.*]] = load half, ptr [[GEP_TABLE_IV]], align 1
-; IC4VF1-NEXT: [[CMP_TABLE_VAL:%.*]] = fcmp ugt half [[LD_TABLE]], [[VAL]]
-; IC4VF1-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1
-; IC4VF1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]]
-; IC4VF1-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0
-; IC4VF1-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]]
; IC4VF1: [[EXIT]]:
-; IC4VF1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; IC4VF1-NEXT: ret i16 [[SPEC_SELECT_LCSSA]]
+; IC4VF1-NEXT: ret i16 [[RDX_SELECT]]
;
entry:
br label %loop
@@ -954,21 +837,8 @@ define i64 @select_decreasing_induction_icmp_iv_unsigned(ptr %a) {
; IC1VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP6]], -1
; IC1VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP6]], i64 331
; IC1VF4-NEXT: br label %[[EXIT:.*]]
-; IC1VF4: [[SCALAR_PH:.*]]:
-; IC1VF4-NEXT: br label %[[LOOP:.*]]
-; IC1VF4: [[LOOP]]:
-; IC1VF4-NEXT: [[IV:%.*]] = phi i64 [ 9223372036854775807, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC1VF4-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
-; IC1VF4-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; IC1VF4-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8
-; IC1VF4-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3
-; IC1VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]]
-; IC1VF4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
-; IC1VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0
-; IC1VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]]
; IC1VF4: [[EXIT]]:
-; IC1VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; IC1VF4-NEXT: ret i64 [[SPEC_SELECT_LCSSA]]
+; IC1VF4-NEXT: ret i64 [[RDX_SELECT]]
;
; IC4VF4-LABEL: define i64 @select_decreasing_induction_icmp_iv_unsigned(
; IC4VF4-SAME: ptr [[A:%.*]]) {
@@ -1024,21 +894,8 @@ define i64 @select_decreasing_induction_icmp_iv_unsigned(ptr %a) {
; IC4VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP18]], -1
; IC4VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP18]], i64 331
; IC4VF4-NEXT: br label %[[EXIT:.*]]
-; IC4VF4: [[SCALAR_PH:.*]]:
-; IC4VF4-NEXT: br label %[[LOOP:.*]]
-; IC4VF4: [[LOOP]]:
-; IC4VF4-NEXT: [[IV:%.*]] = phi i64 [ 9223372036854775807, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC4VF4-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
-; IC4VF4-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; IC4VF4-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8
-; IC4VF4-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3
-; IC4VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]]
-; IC4VF4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
-; IC4VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0
-; IC4VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]]
; IC4VF4: [[EXIT]]:
-; IC4VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; IC4VF4-NEXT: ret i64 [[SPEC_SELECT_LCSSA]]
+; IC4VF4-NEXT: ret i64 [[RDX_SELECT]]
;
; IC4VF1-LABEL: define i64 @select_decreasing_induction_icmp_iv_unsigned(
; IC4VF1-SAME: ptr [[A:%.*]]) {
@@ -1082,21 +939,8 @@ define i64 @select_decreasing_induction_icmp_iv_unsigned(ptr %a) {
; IC4VF1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[RDX_MINMAX5]], -1
; IC4VF1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[RDX_MINMAX5]], i64 331
; IC4VF1-NEXT: br label %[[EXIT:.*]]
-; IC4VF1: [[SCALAR_PH:.*]]:
-; IC4VF1-NEXT: br label %[[LOOP:.*]]
-; IC4VF1: [[LOOP]]:
-; IC4VF1-NEXT: [[IV:%.*]] = phi i64 [ 9223372036854775807, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC4VF1-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
-; IC4VF1-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; IC4VF1-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8
-; IC4VF1-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3
-; IC4VF1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]]
-; IC4VF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
-; IC4VF1-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0
-; IC4VF1-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]]
; IC4VF1: [[EXIT]]:
-; IC4VF1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; IC4VF1-NEXT: ret i64 [[SPEC_SELECT_LCSSA]]
+; IC4VF1-NEXT: ret i64 [[RDX_SELECT]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll
index 0ace547..b991d58 100644
--- a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll
+++ b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll
@@ -261,22 +261,8 @@ define i32 @select_icmp_const_truncated_iv_const_exit(ptr %a) {
; CHECK-VF4IC1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP6]], -2147483648
; CHECK-VF4IC1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP6]], i32 331
; CHECK-VF4IC1-NEXT: br label %[[EXIT:.*]]
-; CHECK-VF4IC1: [[SCALAR_PH:.*]]:
-; CHECK-VF4IC1-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-VF4IC1: [[FOR_BODY]]:
-; CHECK-VF4IC1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF4IC1-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF4IC1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-VF4IC1-NEXT: [[TMP7:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-VF4IC1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP7]], 3
-; CHECK-VF4IC1-NEXT: [[TMP8:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-VF4IC1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP8]], i32 [[RDX]]
-; CHECK-VF4IC1-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1
-; CHECK-VF4IC1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 20000
-; CHECK-VF4IC1-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK-VF4IC1: [[EXIT]]:
-; CHECK-VF4IC1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-VF4IC1-NEXT: ret i32 [[SPEC_SELECT_LCSSA]]
+; CHECK-VF4IC1-NEXT: ret i32 [[RDX_SELECT]]
;
; CHECK-VF4IC4-LABEL: define i32 @select_icmp_const_truncated_iv_const_exit(
; CHECK-VF4IC4-SAME: ptr [[A:%.*]]) {
@@ -322,22 +308,8 @@ define i32 @select_icmp_const_truncated_iv_const_exit(ptr %a) {
; CHECK-VF4IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP15]], -2147483648
; CHECK-VF4IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP15]], i32 331
; CHECK-VF4IC4-NEXT: br label %[[EXIT:.*]]
-; CHECK-VF4IC4: [[SCALAR_PH:.*]]:
-; CHECK-VF4IC4-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-VF4IC4: [[FOR_BODY]]:
-; CHECK-VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF4IC4-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF4IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-VF4IC4-NEXT: [[TMP16:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-VF4IC4-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP16]], 3
-; CHECK-VF4IC4-NEXT: [[TMP17:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-VF4IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP17]], i32 [[RDX]]
-; CHECK-VF4IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1
-; CHECK-VF4IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 20000
-; CHECK-VF4IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK-VF4IC4: [[EXIT]]:
-; CHECK-VF4IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-VF4IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]]
+; CHECK-VF4IC4-NEXT: ret i32 [[RDX_SELECT]]
;
; CHECK-VF1IC4-LABEL: define i32 @select_icmp_const_truncated_iv_const_exit(
; CHECK-VF1IC4-SAME: ptr [[A:%.*]]) {
@@ -384,22 +356,8 @@ define i32 @select_icmp_const_truncated_iv_const_exit(ptr %a) {
; CHECK-VF1IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[RDX_MINMAX5]], -2147483648
; CHECK-VF1IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[RDX_MINMAX5]], i32 331
; CHECK-VF1IC4-NEXT: br label %[[EXIT:.*]]
-; CHECK-VF1IC4: [[SCALAR_PH:.*]]:
-; CHECK-VF1IC4-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-VF1IC4: [[FOR_BODY]]:
-; CHECK-VF1IC4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF1IC4-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF1IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-VF1IC4-NEXT: [[TMP26:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-VF1IC4-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP26]], 3
-; CHECK-VF1IC4-NEXT: [[TMP27:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-VF1IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP27]], i32 [[RDX]]
-; CHECK-VF1IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1
-; CHECK-VF1IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 20000
-; CHECK-VF1IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK-VF1IC4: [[EXIT]]:
-; CHECK-VF1IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-VF1IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]]
+; CHECK-VF1IC4-NEXT: ret i32 [[RDX_SELECT]]
;
entry:
br label %for.body
@@ -446,22 +404,8 @@ define i32 @select_fcmp_max_valid_const_ub(ptr %a) {
; CHECK-VF4IC1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP6]], -2147483648
; CHECK-VF4IC1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP6]], i32 -1
; CHECK-VF4IC1-NEXT: br label %[[EXIT:.*]]
-; CHECK-VF4IC1: [[SCALAR_PH:.*]]:
-; CHECK-VF4IC1-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-VF4IC1: [[FOR_BODY]]:
-; CHECK-VF4IC1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF4IC1-NEXT: [[RDX:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF4IC1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-VF4IC1-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-VF4IC1-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP7]], 0.000000e+00
-; CHECK-VF4IC1-NEXT: [[TMP8:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-VF4IC1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP8]], i32 [[RDX]]
-; CHECK-VF4IC1-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1
-; CHECK-VF4IC1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 2147483648
-; CHECK-VF4IC1-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK-VF4IC1: [[EXIT]]:
-; CHECK-VF4IC1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-VF4IC1-NEXT: ret i32 [[SPEC_SELECT_LCSSA]]
+; CHECK-VF4IC1-NEXT: ret i32 [[RDX_SELECT]]
;
; CHECK-VF4IC4-LABEL: define i32 @select_fcmp_max_valid_const_ub(
; CHECK-VF4IC4-SAME: ptr [[A:%.*]]) {
@@ -507,22 +451,8 @@ define i32 @select_fcmp_max_valid_const_ub(ptr %a) {
; CHECK-VF4IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP15]], -2147483648
; CHECK-VF4IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP15]], i32 -1
; CHECK-VF4IC4-NEXT: br label %[[EXIT:.*]]
-; CHECK-VF4IC4: [[SCALAR_PH:.*]]:
-; CHECK-VF4IC4-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-VF4IC4: [[FOR_BODY]]:
-; CHECK-VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF4IC4-NEXT: [[RDX:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF4IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-VF4IC4-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-VF4IC4-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP16]], 0.000000e+00
-; CHECK-VF4IC4-NEXT: [[TMP17:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-VF4IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP17]], i32 [[RDX]]
-; CHECK-VF4IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1
-; CHECK-VF4IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 2147483648
-; CHECK-VF4IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK-VF4IC4: [[EXIT]]:
-; CHECK-VF4IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-VF4IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]]
+; CHECK-VF4IC4-NEXT: ret i32 [[RDX_SELECT]]
;
; CHECK-VF1IC4-LABEL: define i32 @select_fcmp_max_valid_const_ub(
; CHECK-VF1IC4-SAME: ptr [[A:%.*]]) {
@@ -569,22 +499,8 @@ define i32 @select_fcmp_max_valid_const_ub(ptr %a) {
; CHECK-VF1IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[RDX_MINMAX5]], -2147483648
; CHECK-VF1IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[RDX_MINMAX5]], i32 -1
; CHECK-VF1IC4-NEXT: br label %[[EXIT:.*]]
-; CHECK-VF1IC4: [[SCALAR_PH:.*]]:
-; CHECK-VF1IC4-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-VF1IC4: [[FOR_BODY]]:
-; CHECK-VF1IC4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF1IC4-NEXT: [[RDX:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF1IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
-; CHECK-VF1IC4-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-VF1IC4-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP26]], 0.000000e+00
-; CHECK-VF1IC4-NEXT: [[TMP27:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-VF1IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP27]], i32 [[RDX]]
-; CHECK-VF1IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1
-; CHECK-VF1IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 2147483648
-; CHECK-VF1IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK-VF1IC4: [[EXIT]]:
-; CHECK-VF1IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-VF1IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]]
+; CHECK-VF1IC4-NEXT: ret i32 [[RDX_SELECT]]
;
entry:
br label %for.body
@@ -636,22 +552,8 @@ define i32 @select_icmp_truncated_unsigned_iv_range(ptr %a) {
; CHECK-VF4IC1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP5]], 0
; CHECK-VF4IC1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP5]], i32 331
; CHECK-VF4IC1-NEXT: br label %[[EXIT:.*]]
-; CHECK-VF4IC1: [[SCALAR_PH:.*]]:
-; CHECK-VF4IC1-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-VF4IC1: [[FOR_BODY]]:
-; CHECK-VF4IC1-NEXT: [[IV1:%.*]] = phi i64 [ 2147483646, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF4IC1-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF4IC1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
-; CHECK-VF4IC1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
-; CHECK-VF4IC1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], 3
-; CHECK-VF4IC1-NEXT: [[CONV:%.*]] = trunc i64 [[IV1]] to i32
-; CHECK-VF4IC1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[CONV]], i32 [[RDX]]
-; CHECK-VF4IC1-NEXT: [[INC]] = add nuw nsw i64 [[IV1]], 1
-; CHECK-VF4IC1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 4294967294
-; CHECK-VF4IC1-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK-VF4IC1: [[EXIT]]:
-; CHECK-VF4IC1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-VF4IC1-NEXT: ret i32 [[SPEC_SELECT_LCSSA]]
+; CHECK-VF4IC1-NEXT: ret i32 [[RDX_SELECT]]
;
; CHECK-VF4IC4-LABEL: define i32 @select_icmp_truncated_unsigned_iv_range(
; CHECK-VF4IC4-SAME: ptr [[A:%.*]]) {
@@ -698,22 +600,8 @@ define i32 @select_icmp_truncated_unsigned_iv_range(ptr %a) {
; CHECK-VF4IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP14]], 0
; CHECK-VF4IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP14]], i32 331
; CHECK-VF4IC4-NEXT: br label %[[EXIT:.*]]
-; CHECK-VF4IC4: [[SCALAR_PH:.*]]:
-; CHECK-VF4IC4-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-VF4IC4: [[FOR_BODY]]:
-; CHECK-VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ 2147483646, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF4IC4-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF4IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; CHECK-VF4IC4-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-VF4IC4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP15]], 3
-; CHECK-VF4IC4-NEXT: [[CONV:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-VF4IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[CONV]], i32 [[RDX]]
-; CHECK-VF4IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1
-; CHECK-VF4IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 4294967294
-; CHECK-VF4IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK-VF4IC4: [[EXIT]]:
-; CHECK-VF4IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-VF4IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]]
+; CHECK-VF4IC4-NEXT: ret i32 [[RDX_SELECT]]
;
; CHECK-VF1IC4-LABEL: define i32 @select_icmp_truncated_unsigned_iv_range(
; CHECK-VF1IC4-SAME: ptr [[A:%.*]]) {
@@ -762,22 +650,8 @@ define i32 @select_icmp_truncated_unsigned_iv_range(ptr %a) {
; CHECK-VF1IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[RDX_MINMAX6]], 0
; CHECK-VF1IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[RDX_MINMAX6]], i32 331
; CHECK-VF1IC4-NEXT: br label %[[EXIT:.*]]
-; CHECK-VF1IC4: [[SCALAR_PH:.*]]:
-; CHECK-VF1IC4-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-VF1IC4: [[FOR_BODY]]:
-; CHECK-VF1IC4-NEXT: [[IV:%.*]] = phi i64 [ 2147483646, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF1IC4-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ]
-; CHECK-VF1IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; CHECK-VF1IC4-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-VF1IC4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP24]], 3
-; CHECK-VF1IC4-NEXT: [[CONV:%.*]] = trunc i64 [[IV]] to i32
-; CHECK-VF1IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[CONV]], i32 [[RDX]]
-; CHECK-VF1IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1
-; CHECK-VF1IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 4294967294
-; CHECK-VF1IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK-VF1IC4: [[EXIT]]:
-; CHECK-VF1IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
-; CHECK-VF1IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]]
+; CHECK-VF1IC4-NEXT: ret i32 [[RDX_SELECT]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/iv_outside_user.ll b/llvm/test/Transforms/LoopVectorize/iv_outside_user.ll
index 3f91baa..86515eb 100644
--- a/llvm/test/Transforms/LoopVectorize/iv_outside_user.ll
+++ b/llvm/test/Transforms/LoopVectorize/iv_outside_user.ll
@@ -102,16 +102,8 @@ define i32 @constpre() {
; CHECK-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INC_PHI:%.*]] = phi i32 [ 32, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[INC]] = sub nsw i32 [[INC_PHI]], 2
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[INC]], 0
-; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[INC_PHI_LCSSA:%.*]] = phi i32 [ [[INC_PHI]], %[[FOR_BODY]] ], [ 2, %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[INC_PHI_LCSSA]]
+; CHECK-NEXT: ret i32 2
;
entry:
br label %for.body
@@ -142,18 +134,8 @@ define ptr @geppre(ptr %ptr) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[IND_ESCAPE:%.*]] = getelementptr i8, ptr [[TMP0]], i64 -16
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INC_PHI:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[PTR_PHI:%.*]] = phi ptr [ [[PTR]], %[[SCALAR_PH]] ], [ [[INC_PTR:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[INC]] = add nsw i32 [[INC_PHI]], 1
-; CHECK-NEXT: [[INC_PTR]] = getelementptr i32, ptr [[PTR_PHI]], i32 4
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[INC]], 32
-; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[PTR_PHI_LCSSA:%.*]] = phi ptr [ [[PTR_PHI]], %[[FOR_BODY]] ], [ [[IND_ESCAPE]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret ptr [[PTR_PHI_LCSSA]]
+; CHECK-NEXT: ret ptr [[IND_ESCAPE]]
;
entry:
br label %for.body
@@ -411,18 +393,8 @@ define i64 @iv_scalar_steps_and_outside_users(ptr %ptr) {
; VEC-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
; VEC: [[MIDDLE_BLOCK]]:
; VEC-NEXT: br label %[[EXIT:.*]]
-; VEC: [[SCALAR_PH:.*]]:
-; VEC-NEXT: br label %[[LOOP:.*]]
-; VEC: [[LOOP]]:
-; VEC-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VEC-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
-; VEC-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[IV]]
-; VEC-NEXT: store i64 [[IV]], ptr [[GEP_PTR]], align 4
-; VEC-NEXT: [[EXITCOND:%.*]] = icmp ugt i64 [[IV]], 1000
-; VEC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; VEC: [[EXIT]]:
-; VEC-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ], [ 1001, %[[MIDDLE_BLOCK]] ]
-; VEC-NEXT: ret i64 [[IV_LCSSA]]
+; VEC-NEXT: ret i64 1001
;
; INTERLEAVE-LABEL: define i64 @iv_scalar_steps_and_outside_users(
; INTERLEAVE-SAME: ptr [[PTR:%.*]]) {
@@ -442,18 +414,8 @@ define i64 @iv_scalar_steps_and_outside_users(ptr %ptr) {
; INTERLEAVE-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
; INTERLEAVE: [[MIDDLE_BLOCK]]:
; INTERLEAVE-NEXT: br label %[[EXIT:.*]]
-; INTERLEAVE: [[SCALAR_PH:.*]]:
-; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
-; INTERLEAVE: [[LOOP]]:
-; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
-; INTERLEAVE-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[IV]]
-; INTERLEAVE-NEXT: store i64 [[IV]], ptr [[GEP_PTR]], align 4
-; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp ugt i64 [[IV]], 1000
-; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; INTERLEAVE: [[EXIT]]:
-; INTERLEAVE-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ], [ 1001, %[[MIDDLE_BLOCK]] ]
-; INTERLEAVE-NEXT: ret i64 [[IV_LCSSA]]
+; INTERLEAVE-NEXT: ret i64 1001
;
entry:
br label %loop
@@ -491,20 +453,8 @@ define i32 @iv_2_dead_in_loop_only_used_outside(ptr %ptr) {
; VEC-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
; VEC: [[MIDDLE_BLOCK]]:
; VEC-NEXT: br label %[[EXIT:.*]]
-; VEC: [[SCALAR_PH:.*]]:
-; VEC-NEXT: br label %[[LOOP:.*]]
-; VEC: [[LOOP]]:
-; VEC-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VEC-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
-; VEC-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
-; VEC-NEXT: [[IV_2_NEXT]] = add nuw i32 [[IV_2]], 2
-; VEC-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[IV]]
-; VEC-NEXT: store i64 [[IV]], ptr [[GEP_PTR]], align 4
-; VEC-NEXT: [[EXITCOND:%.*]] = icmp ugt i64 [[IV]], 1000
-; VEC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; VEC: [[EXIT]]:
-; VEC-NEXT: [[IV_2_LCSSA:%.*]] = phi i32 [ [[IV_2]], %[[LOOP]] ], [ 2002, %[[MIDDLE_BLOCK]] ]
-; VEC-NEXT: ret i32 [[IV_2_LCSSA]]
+; VEC-NEXT: ret i32 2002
;
; INTERLEAVE-LABEL: define i32 @iv_2_dead_in_loop_only_used_outside(
; INTERLEAVE-SAME: ptr [[PTR:%.*]]) {
@@ -524,20 +474,8 @@ define i32 @iv_2_dead_in_loop_only_used_outside(ptr %ptr) {
; INTERLEAVE-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
; INTERLEAVE: [[MIDDLE_BLOCK]]:
; INTERLEAVE-NEXT: br label %[[EXIT:.*]]
-; INTERLEAVE: [[SCALAR_PH:.*]]:
-; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
-; INTERLEAVE: [[LOOP]]:
-; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; INTERLEAVE-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
-; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
-; INTERLEAVE-NEXT: [[IV_2_NEXT]] = add nuw i32 [[IV_2]], 2
-; INTERLEAVE-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[IV]]
-; INTERLEAVE-NEXT: store i64 [[IV]], ptr [[GEP_PTR]], align 4
-; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp ugt i64 [[IV]], 1000
-; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; INTERLEAVE: [[EXIT]]:
-; INTERLEAVE-NEXT: [[IV_2_LCSSA:%.*]] = phi i32 [ [[IV_2]], %[[LOOP]] ], [ 2002, %[[MIDDLE_BLOCK]] ]
-; INTERLEAVE-NEXT: ret i32 [[IV_2_LCSSA]]
+; INTERLEAVE-NEXT: ret i32 2002
;
entry:
br label %loop
@@ -1092,18 +1030,8 @@ define i32 @test_iv_uniform_with_outside_use_scev_simplification(ptr %dst) {
; VEC-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
; VEC: [[MIDDLE_BLOCK]]:
; VEC-NEXT: br label %[[E_EXIT:.*]]
-; VEC: [[SCALAR_PH:.*]]:
-; VEC-NEXT: br label %[[LOOP:.*]]
-; VEC: [[LOOP]]:
-; VEC-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VEC-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i32 [[IV]]
-; VEC-NEXT: store i16 0, ptr [[GEP_DST]], align 2
-; VEC-NEXT: [[IV_NEXT]] = add i32 [[STEP_2]], [[IV]]
-; VEC-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[IV_NEXT]], 8
-; VEC-NEXT: br i1 [[CMP_I]], label %[[LOOP]], label %[[E_EXIT]]
; VEC: [[E_EXIT]]:
-; VEC-NEXT: [[RES:%.*]] = phi i32 [ [[IV_NEXT]], %[[LOOP]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
-; VEC-NEXT: ret i32 [[RES]]
+; VEC-NEXT: ret i32 [[TMP5]]
;
; INTERLEAVE-LABEL: define i32 @test_iv_uniform_with_outside_use_scev_simplification(
; INTERLEAVE-SAME: ptr [[DST:%.*]]) {
@@ -1126,18 +1054,8 @@ define i32 @test_iv_uniform_with_outside_use_scev_simplification(ptr %dst) {
; INTERLEAVE-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
; INTERLEAVE: [[MIDDLE_BLOCK]]:
; INTERLEAVE-NEXT: br label %[[E_EXIT:.*]]
-; INTERLEAVE: [[SCALAR_PH:.*]]:
-; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
-; INTERLEAVE: [[LOOP]]:
-; INTERLEAVE-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; INTERLEAVE-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i32 [[IV]]
-; INTERLEAVE-NEXT: store i16 0, ptr [[GEP_DST]], align 2
-; INTERLEAVE-NEXT: [[IV_NEXT]] = add i32 [[STEP_2]], [[IV]]
-; INTERLEAVE-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[IV_NEXT]], 8
-; INTERLEAVE-NEXT: br i1 [[CMP_I]], label %[[LOOP]], label %[[E_EXIT]]
; INTERLEAVE: [[E_EXIT]]:
-; INTERLEAVE-NEXT: [[RES:%.*]] = phi i32 [ [[IV_NEXT]], %[[LOOP]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
-; INTERLEAVE-NEXT: ret i32 [[RES]]
+; INTERLEAVE-NEXT: ret i32 [[TMP5]]
;
entry:
%step.1 = sext i8 0 to i32
@@ -1187,19 +1105,8 @@ define i32 @test_iv_uniform_with_outside_use_scev_simplification_2(ptr %dst) {
; VEC: [[MIDDLE_BLOCK]]:
; VEC-NEXT: [[TMP7:%.*]] = extractelement <2 x i32> [[TMP5]], i32 1
; VEC-NEXT: br label %[[E_EXIT:.*]]
-; VEC: [[SCALAR_PH:.*]]:
-; VEC-NEXT: br label %[[LOOP:.*]]
-; VEC: [[LOOP]]:
-; VEC-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VEC-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i32 [[IV]]
-; VEC-NEXT: store i16 0, ptr [[GEP_DST]], align 2
-; VEC-NEXT: [[INC:%.*]] = add i32 [[IV]], 1
-; VEC-NEXT: [[IV_NEXT]] = add i32 [[STEP_2]], [[INC]]
-; VEC-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[IV_NEXT]], 8
-; VEC-NEXT: br i1 [[CMP_I]], label %[[LOOP]], label %[[E_EXIT]]
; VEC: [[E_EXIT]]:
-; VEC-NEXT: [[RES:%.*]] = phi i32 [ [[IV_NEXT]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
-; VEC-NEXT: ret i32 [[RES]]
+; VEC-NEXT: ret i32 [[TMP7]]
;
; INTERLEAVE-LABEL: define i32 @test_iv_uniform_with_outside_use_scev_simplification_2(
; INTERLEAVE-SAME: ptr [[DST:%.*]]) {
@@ -1224,19 +1131,8 @@ define i32 @test_iv_uniform_with_outside_use_scev_simplification_2(ptr %dst) {
; INTERLEAVE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
; INTERLEAVE: [[MIDDLE_BLOCK]]:
; INTERLEAVE-NEXT: br label %[[E_EXIT:.*]]
-; INTERLEAVE: [[SCALAR_PH:.*]]:
-; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
-; INTERLEAVE: [[LOOP]]:
-; INTERLEAVE-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; INTERLEAVE-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i32 [[IV]]
-; INTERLEAVE-NEXT: store i16 0, ptr [[GEP_DST]], align 2
-; INTERLEAVE-NEXT: [[INC:%.*]] = add i32 [[IV]], 1
-; INTERLEAVE-NEXT: [[IV_NEXT]] = add i32 [[STEP_2]], [[INC]]
-; INTERLEAVE-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[IV_NEXT]], 8
-; INTERLEAVE-NEXT: br i1 [[CMP_I]], label %[[LOOP]], label %[[E_EXIT]]
; INTERLEAVE: [[E_EXIT]]:
-; INTERLEAVE-NEXT: [[RES:%.*]] = phi i32 [ [[IV_NEXT]], %[[LOOP]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
-; INTERLEAVE-NEXT: ret i32 [[RES]]
+; INTERLEAVE-NEXT: ret i32 [[TMP5]]
;
entry:
%step.1 = sext i8 0 to i32
@@ -1356,24 +1252,12 @@ define i64 @test_iv_increment_incremented(ptr %dst) {
; VEC-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[TMP1]], i32 -1
; VEC-NEXT: store <2 x i16> splat (i16 1), ptr [[TMP2]], align 2
; VEC-NEXT: [[TMP5:%.*]] = add i64 1, -1
-; VEC-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], 1
+; VEC-NEXT: [[IV_1_NEXT_LCSSA1:%.*]] = add i64 [[TMP5]], 1
; VEC-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; VEC: [[MIDDLE_BLOCK]]:
; VEC-NEXT: br label %[[EXIT:.*]]
-; VEC: [[SCALAR_PH:.*]]:
-; VEC-NEXT: br label %[[LOOP:.*]]
-; VEC: [[LOOP]]:
-; VEC-NEXT: [[IV_1:%.*]] = phi i64 [ 3, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
-; VEC-NEXT: [[IV_2:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
-; VEC-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV_1]]
-; VEC-NEXT: store i16 1, ptr [[GEP]], align 2
-; VEC-NEXT: [[IV_2_NEXT]] = add i64 [[IV_2]], -1
-; VEC-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_2_NEXT]], 0
-; VEC-NEXT: [[IV_1_NEXT]] = add i64 [[IV_2_NEXT]], 1
-; VEC-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; VEC: [[EXIT]]:
-; VEC-NEXT: [[IV_1_NEXT_LCSSA:%.*]] = phi i64 [ [[IV_1_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; VEC-NEXT: ret i64 [[IV_1_NEXT_LCSSA]]
+; VEC-NEXT: ret i64 [[IV_1_NEXT_LCSSA1]]
;
; INTERLEAVE-LABEL: define i64 @test_iv_increment_incremented(
; INTERLEAVE-SAME: ptr [[DST:%.*]]) {
@@ -1387,24 +1271,12 @@ define i64 @test_iv_increment_incremented(ptr %dst) {
; INTERLEAVE-NEXT: store i16 1, ptr [[TMP0]], align 2
; INTERLEAVE-NEXT: store i16 1, ptr [[TMP1]], align 2
; INTERLEAVE-NEXT: [[TMP2:%.*]] = add i64 1, -1
-; INTERLEAVE-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], 1
+; INTERLEAVE-NEXT: [[IV_1_NEXT_LCSSA1:%.*]] = add i64 [[TMP2]], 1
; INTERLEAVE-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; INTERLEAVE: [[MIDDLE_BLOCK]]:
; INTERLEAVE-NEXT: br label %[[EXIT:.*]]
-; INTERLEAVE: [[SCALAR_PH:.*]]:
-; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
-; INTERLEAVE: [[LOOP]]:
-; INTERLEAVE-NEXT: [[IV_1:%.*]] = phi i64 [ 3, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
-; INTERLEAVE-NEXT: [[IV_2:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
-; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV_1]]
-; INTERLEAVE-NEXT: store i16 1, ptr [[GEP]], align 2
-; INTERLEAVE-NEXT: [[IV_2_NEXT]] = add i64 [[IV_2]], -1
-; INTERLEAVE-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_2_NEXT]], 0
-; INTERLEAVE-NEXT: [[IV_1_NEXT]] = add i64 [[IV_2_NEXT]], 1
-; INTERLEAVE-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; INTERLEAVE: [[EXIT]]:
-; INTERLEAVE-NEXT: [[IV_1_NEXT_LCSSA:%.*]] = phi i64 [ [[IV_1_NEXT]], %[[LOOP]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ]
-; INTERLEAVE-NEXT: ret i64 [[IV_1_NEXT_LCSSA]]
+; INTERLEAVE-NEXT: ret i64 [[IV_1_NEXT_LCSSA1]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll b/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll
index 11d48df..9358fd9 100644
--- a/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll
+++ b/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll
@@ -48,29 +48,9 @@ define i16 @test_access_size_not_multiple_of_align(i64 %len, ptr %test_base) {
; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP17:%.*]] = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> [[TMP15]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[L_T:%.*]] = load i8, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp sge i8 [[L_T]], 0
-; CHECK-NEXT: br i1 [[CMP]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i16, ptr [[ALLOCA]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i16, ptr [[ADDR]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i16 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i16 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp eq i64 [[IV]], 4095
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i16 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i16 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i16 [[TMP17]]
;
entry:
%alloca = alloca [163840 x i16], align 4
@@ -142,29 +122,9 @@ define i32 @test_access_size_multiple_of_align_but_offset_by_1(i64 %len, ptr %te
; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[TMP15]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[L_T:%.*]] = load i8, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp sge i8 [[L_T]], 0
-; CHECK-NEXT: br i1 [[CMP]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[START]], i64 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp eq i64 [[IV]], 4095
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP17]]
;
entry:
%alloca = alloca [163840 x i32], align 4
@@ -370,26 +330,7 @@ define void @test_rev_loops_deref_loads(ptr nocapture noundef writeonly %dest) {
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1023, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_CMP]], i64 0, i64 [[IV]]
-; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp eq i32 [[TMP19]], 3
-; CHECK-NEXT: br i1 [[CMP3_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[IV]]
-; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
-; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP20]], 2
-; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[IV]]
-; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX7]], align 4
-; CHECK-NEXT: br label [[FOR_INC]]
-; CHECK: for.inc:
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
-; CHECK-NEXT: [[CMP2_NOT:%.*]] = icmp eq i64 [[IV]], 0
-; CHECK-NEXT: br i1 [[CMP2_NOT]], label [[EXIT]], label [[FOR_BODY]]
+; CHECK-NEXT: br label [[FOR_INC:%.*]]
; CHECK: exit:
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DEST:%.*]], ptr [[LOCAL_DEST]], i64 1024, i1 false)
; CHECK-NEXT: ret void
@@ -481,27 +422,7 @@ define void @test_rev_loops_non_deref_loads(ptr nocapture noundef writeonly %des
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1023, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-NEXT: [[OFF:%.*]] = add i64 [[IV]], -1
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_CMP]], i64 0, i64 [[OFF]]
-; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp eq i32 [[TMP22]], 3
-; CHECK-NEXT: br i1 [[CMP3_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[OFF]]
-; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
-; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP23]], 2
-; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[OFF]]
-; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX7]], align 4
-; CHECK-NEXT: br label [[FOR_INC]]
-; CHECK: for.inc:
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
-; CHECK-NEXT: [[CMP2_NOT:%.*]] = icmp eq i64 [[IV]], 0
-; CHECK-NEXT: br i1 [[CMP2_NOT]], label [[EXIT]], label [[FOR_BODY]]
+; CHECK-NEXT: br label [[FOR_INC:%.*]]
; CHECK: exit:
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DEST:%.*]], ptr [[LOCAL_DEST]], i64 1024, i1 false)
; CHECK-NEXT: ret void
@@ -574,30 +495,9 @@ define i16 @test_strided_access(i64 %len, ptr %test_base) {
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP15:%.*]] = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> [[TMP13]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[IV]]
-; CHECK-NEXT: [[L_T:%.*]] = load i8, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp sge i8 [[L_T]], 0
-; CHECK-NEXT: br i1 [[CMP]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[IV_STRIDE:%.*]] = mul i64 [[IV]], 2
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i16, ptr [[ALLOCA]], i64 [[IV_STRIDE]]
-; CHECK-NEXT: [[VAL:%.*]] = load i16, ptr [[ADDR]], align 2
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i16 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i16 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp eq i64 [[IV]], 4095
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i16 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i16 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i16 [[TMP15]]
;
entry:
%alloca = alloca [163840 x i16], align 4
@@ -681,27 +581,7 @@ define void @test_rev_loops_strided_deref_loads(ptr nocapture noundef writeonly
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 511, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_CMP]], i64 0, i64 [[IV]]
-; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp eq i32 [[TMP21]], 3
-; CHECK-NEXT: br i1 [[CMP3_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[IV_STRIDED:%.*]] = mul i64 [[IV]], 2
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[IV_STRIDED]]
-; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
-; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP22]], 2
-; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[IV]]
-; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX7]], align 4
-; CHECK-NEXT: br label [[FOR_INC]]
-; CHECK: for.inc:
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
-; CHECK-NEXT: [[CMP2_NOT:%.*]] = icmp eq i64 [[IV]], 0
-; CHECK-NEXT: br i1 [[CMP2_NOT]], label [[EXIT]], label [[FOR_BODY]]
+; CHECK-NEXT: br label [[FOR_INC:%.*]]
; CHECK: exit:
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DEST:%.*]], ptr [[LOCAL_DEST]], i64 1024, i1 false)
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll b/llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll
index b224534..b14a1cd 100644
--- a/llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll
+++ b/llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll
@@ -52,28 +52,9 @@ define i8 @test_negative_off(i16 %len, ptr %test_base) {
; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP20:%.*]] = call i8 @llvm.vector.reduce.add.v2i8(<2 x i8> [[TMP18]])
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ -1000, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
-; CHECK-NEXT: [[ACCUM:%.*]] = phi i8 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
-; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i16 [[IV]]
-; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1
-; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]]
-; CHECK: pred:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr i8, ptr [[ALLOCA]], i16 [[IV]]
-; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[ADDR]], align 1
-; CHECK-NEXT: br label [[LATCH]]
-; CHECK: latch:
-; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i8 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ]
-; CHECK-NEXT: [[ACCUM_NEXT]] = add i8 [[ACCUM]], [[VAL_PHI]]
-; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i16 [[IV]], -990
-; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]]
+; CHECK-NEXT: br label [[LATCH:%.*]]
; CHECK: loop_exit:
-; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i8 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP20]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i8 [[ACCUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i8 [[TMP20]]
;
entry:
%alloca = alloca [64638 x i8]
diff --git a/llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll b/llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll
index f44fc4e..096a0a8 100644
--- a/llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll
+++ b/llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll
@@ -30,28 +30,6 @@ define void @accesses_to_struct_dereferenceable(ptr noalias %dst) {
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[GEP_DST]], align 4
-; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i32 [[D]], 0
-; CHECK-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @foo, i64 0, i32 0, i64 [[IV]]
-; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: if.else:
-; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @foo, i64 0, i32 1, i64 [[IV]]
-; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[TMP_0:%.*]] = phi i32 [ [[L_A]], [[IF_THEN]] ], [ [[L_B]], [[IF_ELSE]] ]
-; CHECK-NEXT: store i32 [[TMP_0]], ptr [[GEP_DST]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 32000
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP_HEADER]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -265,29 +243,6 @@ define void @accesses_to_struct_may_not_be_dereferenceable_access_size(ptr noali
; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[GEP_DST]], align 4
-; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i32 [[D]], 0
-; CHECK-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @foo, i64 0, i32 0, i64 [[IV]]
-; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: if.else:
-; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @foo, i64 0, i32 1, i64 [[IV]]
-; CHECK-NEXT: [[L_B:%.*]] = load i64, ptr [[GEP_B]], align 4
-; CHECK-NEXT: [[T:%.*]] = trunc i64 [[L_B]] to i32
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[TMP_0:%.*]] = phi i32 [ [[L_A]], [[IF_THEN]] ], [ [[T]], [[IF_ELSE]] ]
-; CHECK-NEXT: store i32 [[TMP_0]], ptr [[GEP_DST]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 32000
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP_HEADER]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/loop-form.ll b/llvm/test/Transforms/LoopVectorize/loop-form.ll
index c589c77..aed1e29 100644
--- a/llvm/test/Transforms/LoopVectorize/loop-form.ll
+++ b/llvm/test/Transforms/LoopVectorize/loop-form.ll
@@ -79,17 +79,7 @@ define void @bottom_tested(ptr %p, i32 %n) {
; TAILFOLD-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; TAILFOLD-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; TAILFOLD: middle.block:
-; TAILFOLD-NEXT: br label [[IF_END:%.*]]
-; TAILFOLD: scalar.ph:
; TAILFOLD-NEXT: br label [[FOR_COND:%.*]]
-; TAILFOLD: for.cond:
-; TAILFOLD-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_COND]] ]
-; TAILFOLD-NEXT: [[IPROM:%.*]] = sext i32 [[I]] to i64
-; TAILFOLD-NEXT: [[B:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IPROM]]
-; TAILFOLD-NEXT: store i16 0, ptr [[B]], align 4
-; TAILFOLD-NEXT: [[INC]] = add nsw i32 [[I]], 1
-; TAILFOLD-NEXT: [[CMP:%.*]] = icmp slt i32 [[I]], [[N]]
-; TAILFOLD-NEXT: br i1 [[CMP]], label [[FOR_COND]], label [[IF_END]]
; TAILFOLD: if.end:
; TAILFOLD-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll b/llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll
index 781980d..1fe802f 100644
--- a/llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll
+++ b/llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll
@@ -32,17 +32,6 @@ define void @scalar_loop_dead(ptr noundef captures(none) %a, float noundef %x) {
; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[LOAD:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[MUL:%.*]] = fmul float [[X]], [[LOAD]]
-; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[COMP:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[COMP]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -83,7 +72,7 @@ define void @scalar_loop_live(ptr noundef captures(none) %a, float noundef %x, i
; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[TMP0]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -98,7 +87,7 @@ define void @scalar_loop_live(ptr noundef captures(none) %a, float noundef %x, i
; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[COMP:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[COMP]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[COMP]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -128,9 +117,6 @@ exit:
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized"}
; CHECK: [[META2]] = !{!"llvm.loop.unroll.count", i32 8}
; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META5:![0-9]+]], [[META6:![0-9]+]]}
-; CHECK: [[META5]] = !{!"llvm.loop.vectorize.enable", i1 true}
-; CHECK: [[META6]] = !{!"llvm.loop.vectorize.followup_all", [[META1]], [[META2]]}
-; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]], [[META3]]}
-; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll
index bb51992..30ee480 100644
--- a/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll
+++ b/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll
@@ -69,19 +69,7 @@ define void @maxvf3() {
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[J:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[J_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[AJ:%.*]] = getelementptr inbounds [18 x i8], ptr @a, i32 0, i32 [[J]]
-; CHECK-NEXT: store i8 69, ptr [[AJ]], align 8
-; CHECK-NEXT: [[JP3:%.*]] = add nuw nsw i32 3, [[J]]
-; CHECK-NEXT: [[AJP3:%.*]] = getelementptr inbounds [18 x i8], ptr @a, i32 0, i32 [[JP3]]
-; CHECK-NEXT: store i8 7, ptr [[AJP3]], align 8
-; CHECK-NEXT: [[J_NEXT]] = add nuw nsw i32 [[J]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[J_NEXT]], 15
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/metadata.ll b/llvm/test/Transforms/LoopVectorize/metadata.ll
index e2dadff..3c59a27 100644
--- a/llvm/test/Transforms/LoopVectorize/metadata.ll
+++ b/llvm/test/Transforms/LoopVectorize/metadata.ll
@@ -142,18 +142,6 @@ define void @widen_call_range(ptr noalias %a, ptr readonly %b) {
; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4, !tbaa [[CHAR_TBAA0]], !range [[RNG9:![0-9]+]]
-; CHECK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR1:[0-9]+]], !range [[RNG9]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -180,18 +168,6 @@ define void @widen_call_range(ptr noalias %a, ptr readonly %b) {
; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; INTERLEAVE: [[MIDDLE_BLOCK]]:
; INTERLEAVE-NEXT: br label %[[EXIT:.*]]
-; INTERLEAVE: [[SCALAR_PH:.*]]:
-; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
-; INTERLEAVE: [[LOOP]]:
-; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
-; INTERLEAVE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4, !tbaa [[CHAR_TBAA0]], !range [[RNG9:![0-9]+]]
-; INTERLEAVE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR1:[0-9]+]], !range [[RNG9]]
-; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; INTERLEAVE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4
-; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; INTERLEAVE: [[EXIT]]:
; INTERLEAVE-NEXT: ret void
;
@@ -229,21 +205,9 @@ define void @widen_call_fpmath(ptr noalias %a, ptr readonly %b) {
; CHECK-NEXT: store <2 x double> [[TMP1]], ptr [[TMP3]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]]
-; CHECK-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[CHAR_TBAA0]]
-; CHECK-NEXT: [[CALL:%.*]] = call double @bar(double [[LOAD]]) #[[ATTR2:[0-9]+]], !fpmath [[META3]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -267,21 +231,9 @@ define void @widen_call_fpmath(ptr noalias %a, ptr readonly %b) {
; INTERLEAVE-NEXT: store <2 x double> [[TMP4]], ptr [[TMP7]], align 8
; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; INTERLEAVE: [[MIDDLE_BLOCK]]:
; INTERLEAVE-NEXT: br label %[[EXIT:.*]]
-; INTERLEAVE: [[SCALAR_PH:.*]]:
-; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
-; INTERLEAVE: [[LOOP]]:
-; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]]
-; INTERLEAVE-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[CHAR_TBAA0]]
-; INTERLEAVE-NEXT: [[CALL:%.*]] = call double @bar(double [[LOAD]]) #[[ATTR2:[0-9]+]], !fpmath [[META3]]
-; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]]
-; INTERLEAVE-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8
-; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; INTERLEAVE: [[EXIT]]:
; INTERLEAVE-NEXT: ret void
;
@@ -319,21 +271,9 @@ define void @widen_intrinsic(ptr noalias %a, ptr readonly %b) {
; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[TMP3]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4
-; CHECK-NEXT: [[CALL:%.*]] = call i64 @llvm.abs.i64(i64 [[LOAD]], i1 true), !range [[RNG9]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -357,21 +297,9 @@ define void @widen_intrinsic(ptr noalias %a, ptr readonly %b) {
; INTERLEAVE-NEXT: store <2 x i64> [[TMP4]], ptr [[TMP7]], align 4
; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; INTERLEAVE: [[MIDDLE_BLOCK]]:
; INTERLEAVE-NEXT: br label %[[EXIT:.*]]
-; INTERLEAVE: [[SCALAR_PH:.*]]:
-; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
-; INTERLEAVE: [[LOOP]]:
-; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
-; INTERLEAVE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4
-; INTERLEAVE-NEXT: [[CALL:%.*]] = call i64 @llvm.abs.i64(i64 [[LOAD]], i1 true), !range [[RNG9]]
-; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; INTERLEAVE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4
-; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; INTERLEAVE: [[EXIT]]:
; INTERLEAVE-NEXT: ret void
;
@@ -409,21 +337,9 @@ define void @widen_intrinsic_fpmath(ptr noalias %a, ptr readonly %b) {
; CHECK-NEXT: store <2 x double> [[TMP1]], ptr [[TMP3]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]]
-; CHECK-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[CHAR_TBAA0]]
-; CHECK-NEXT: [[CALL:%.*]] = call double @llvm.sin.f64(double [[LOAD]]) #[[ATTR2]], !fpmath [[META3]]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -447,21 +363,9 @@ define void @widen_intrinsic_fpmath(ptr noalias %a, ptr readonly %b) {
; INTERLEAVE-NEXT: store <2 x double> [[TMP4]], ptr [[TMP7]], align 8
; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; INTERLEAVE: [[MIDDLE_BLOCK]]:
; INTERLEAVE-NEXT: br label %[[EXIT:.*]]
-; INTERLEAVE: [[SCALAR_PH:.*]]:
-; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
-; INTERLEAVE: [[LOOP]]:
-; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]]
-; INTERLEAVE-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[CHAR_TBAA0]]
-; INTERLEAVE-NEXT: [[CALL:%.*]] = call double @llvm.sin.f64(double [[LOAD]]) #[[ATTR2]], !fpmath [[META3]]
-; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]]
-; INTERLEAVE-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8
-; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; INTERLEAVE: [[EXIT]]:
; INTERLEAVE-NEXT: ret void
;
@@ -506,7 +410,7 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) {
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <2 x i32> [[TMP3]], splat (i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SIZE]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -522,7 +426,7 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) {
; CHECK-NEXT: store ptr [[ARRAYIDX_2]], ptr [[ARRAYIDX_1]], align 8, !custom_md [[META2]]
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !custom_md [[META2]]
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[SIZE]], !custom_md [[META2]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP14:![0-9]+]], !custom_md [[META2]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP13:![0-9]+]], !custom_md [[META2]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -555,7 +459,7 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) {
; INTERLEAVE-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2)
; INTERLEAVE-NEXT: [[VEC_IND_NEXT2]] = add <2 x i32> [[STEP_ADD3]], splat (i32 2)
; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; INTERLEAVE: [[MIDDLE_BLOCK]]:
; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SIZE]], [[N_VEC]]
; INTERLEAVE-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -571,7 +475,7 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) {
; INTERLEAVE-NEXT: store ptr [[ARRAYIDX_2]], ptr [[ARRAYIDX_1]], align 8, !custom_md [[META2]]
; INTERLEAVE-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !custom_md [[META2]]
; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[SIZE]], !custom_md [[META2]]
-; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP14:![0-9]+]], !custom_md [[META2]]
+; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP13:![0-9]+]], !custom_md [[META2]]
; INTERLEAVE: [[EXIT]]:
; INTERLEAVE-NEXT: ret void
;
@@ -617,12 +521,11 @@ attributes #1 = { nounwind "vector-function-abi-variant"="_ZGV_LLVM_N2v_bar(bar_
; CHECK: [[META6]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META6]], [[META5]]}
; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META5]], [[META6]]}
-; CHECK: [[RNG9]] = !{i64 0, i64 2}
+; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META5]], [[META6]]}
; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META5]], [[META6]]}
; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META5]], [[META6]]}
; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META5]], [[META6]]}
-; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META5]], [[META6]]}
-; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META6]], [[META5]]}
+; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META6]], [[META5]]}
;.
; INTERLEAVE: [[CHAR_TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0, i64 0}
; INTERLEAVE: [[META1]] = !{!"omnipotent char", [[META2]]}
@@ -633,10 +536,9 @@ attributes #1 = { nounwind "vector-function-abi-variant"="_ZGV_LLVM_N2v_bar(bar_
; INTERLEAVE: [[META6]] = !{!"llvm.loop.unroll.runtime.disable"}
; INTERLEAVE: [[LOOP7]] = distinct !{[[LOOP7]], [[META6]], [[META5]]}
; INTERLEAVE: [[LOOP8]] = distinct !{[[LOOP8]], [[META5]], [[META6]]}
-; INTERLEAVE: [[RNG9]] = !{i64 0, i64 2}
+; INTERLEAVE: [[LOOP9]] = distinct !{[[LOOP9]], [[META5]], [[META6]]}
; INTERLEAVE: [[LOOP10]] = distinct !{[[LOOP10]], [[META5]], [[META6]]}
; INTERLEAVE: [[LOOP11]] = distinct !{[[LOOP11]], [[META5]], [[META6]]}
; INTERLEAVE: [[LOOP12]] = distinct !{[[LOOP12]], [[META5]], [[META6]]}
-; INTERLEAVE: [[LOOP13]] = distinct !{[[LOOP13]], [[META5]], [[META6]]}
-; INTERLEAVE: [[LOOP14]] = distinct !{[[LOOP14]], [[META6]], [[META5]]}
+; INTERLEAVE: [[LOOP13]] = distinct !{[[LOOP13]], [[META6]], [[META5]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll b/llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll
index 7866728..47a2a84 100644
--- a/llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll
@@ -26,20 +26,8 @@ define float @maximumnum_intrinsic(ptr readonly %x) {
; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.vector.reduce.fmax.v2f32(<2 x float> [[RDX_MINMAX]])
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[IV1]]
-; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP1]], align 4
-; CHECK-NEXT: [[RED_NEXT]] = tail call float @llvm.maximumnum.f32(float [[RED]], float [[L]])
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[IV1]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[INC]], 1024
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]]
+; CHECK-NEXT: ret float [[TMP6]]
;
entry:
br label %loop
@@ -82,20 +70,8 @@ define float @maximumnum_intrinsic_fast(ptr readonly %x) {
; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call fast <2 x float> @llvm.maximumnum.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fmax.v2f32(<2 x float> [[RDX_MINMAX]])
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[IV1]]
-; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP1]], align 4
-; CHECK-NEXT: [[RED_NEXT]] = tail call fast float @llvm.maximumnum.f32(float [[RED]], float [[L]])
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[IV1]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[INC]], 1024
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]]
+; CHECK-NEXT: ret float [[TMP6]]
;
entry:
br label %loop
@@ -138,20 +114,8 @@ define float @minimumnum_intrinsic(ptr readonly %x) {
; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.vector.reduce.fmin.v2f32(<2 x float> [[RDX_MINMAX]])
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[IV1]]
-; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP1]], align 4
-; CHECK-NEXT: [[RED_NEXT]] = tail call float @llvm.minimumnum.f32(float [[RED]], float [[L]])
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[IV1]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[INC]], 1024
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]]
+; CHECK-NEXT: ret float [[TMP6]]
;
entry:
br label %loop
@@ -194,20 +158,8 @@ define float @minimumnum_intrinsic_fast(ptr readonly %x) {
; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call fast <2 x float> @llvm.minimumnum.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fmin.v2f32(<2 x float> [[RDX_MINMAX]])
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[IV1]]
-; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP1]], align 4
-; CHECK-NEXT: [[RED_NEXT]] = tail call fast float @llvm.minimumnum.f32(float [[RED]], float [[L]])
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[IV1]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[INC]], 1024
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]]
+; CHECK-NEXT: ret float [[TMP6]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
index 2e88ff6..a1fc1b8 100644
--- a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
+++ b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
@@ -34,10 +34,6 @@ define i32 @main() #0 {
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 0
;
diff --git a/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll
index d928a4b..b19f9c5 100644
--- a/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll
@@ -12,14 +12,7 @@ define void @sincos_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noali
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
; CHECK: [[EXIT:.*:]]
;
entry:
@@ -55,14 +48,7 @@ define void @sincos_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noali
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincos.f64(double [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
-; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
-; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
; CHECK: [[EXIT:.*:]]
;
entry:
@@ -91,9 +77,9 @@ define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly
; CHECK-LABEL: define void @predicated_sincos(
; CHECK-SAME: float [[X:%.*]], ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
; CHECK: [[ENTRY:.*:]]
-; CHECK: [[VECTOR_BODY1:.*]]:
-; CHECK: [[VECTOR_BODY:.*:]]
-; CHECK: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_BODY1]] ], [ [[INDEX_NEXT:%.*]], %[[IF_THEN2:.*]] ]
+; CHECK: [[VECTOR_BODY:.*]]:
+; CHECK: [[VECTOR_BODY1:.*:]]
+; CHECK: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_BODY]] ], [ [[INDEX_NEXT:%.*]], %[[IF_THEN1:.*]] ]
; CHECK: [[TMP4:%.*]] = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> [[WIDE_LOAD:%.*]])
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 0
; CHECK: [[TMP6:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 1
@@ -107,23 +93,14 @@ define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly
; CHECK: br label %[[PRED_STORE_CONTINUE]]
; CHECK: [[PRED_STORE_CONTINUE]]:
; CHECK: [[TMP12:%.*]] = extractelement <2 x i1> [[TMP3]], i32 1
-; CHECK: br i1 [[TMP12]], label %[[PRED_STORE_IF1:.*]], label %[[IF_THEN2]]
+; CHECK: br i1 [[TMP12]], label %[[PRED_STORE_IF1:.*]], label %[[IF_THEN1]]
; CHECK: [[PRED_STORE_IF1]]:
; CHECK: [[TMP15:%.*]] = extractelement <2 x float> [[TMP5]], i32 1
; CHECK: store float [[TMP15]], ptr [[TMP14:%.*]], align 4
; CHECK: [[TMP17:%.*]] = extractelement <2 x float> [[TMP6]], i32 1
; CHECK: store float [[TMP17]], ptr [[TMP16:%.*]], align 4
-; CHECK: br label %[[IF_THEN2]]
-; CHECK: [[IF_THEN2]]:
-; CHECK: [[IF_THEN:.*:]]
-; CHECK: [[IF_THEN3:.*:]]
-; CHECK: [[IF_THEN4:.*:]]
-; CHECK: [[IF_THEN1:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
+; CHECK: br label %[[IF_THEN1]]
+; CHECK: [[IF_THEN1]]:
; CHECK: [[IF_MERGE:.*:]]
; CHECK: [[FOR_END:.*:]]
;
@@ -167,14 +144,7 @@ define void @modf_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.modf.f32(float [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
; CHECK: [[EXIT:.*:]]
;
entry:
@@ -210,14 +180,7 @@ define void @modf_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.modf.f64(double [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
-; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
-; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
; CHECK: [[EXIT:.*:]]
;
entry:
@@ -253,14 +216,7 @@ define void @sincospi_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noa
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1
; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4
; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincospi.f32(float [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
-; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4
-; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4
; CHECK: [[EXIT:.*:]]
;
entry:
@@ -296,14 +252,7 @@ define void @sincospi_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noa
; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1
; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8
; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8
-; CHECK: [[MIDDLE_BLOCK:.*:]]
-; CHECK: [[SCALAR_PH:.*:]]
; CHECK: [[FOR_BODY:.*:]]
-; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincospi.f64(double [[IN_VAL:%.*]])
-; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0
-; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1
-; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8
-; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8
; CHECK: [[EXIT:.*:]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll b/llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll
index 9b6774e..481fa04 100644
--- a/llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll
+++ b/llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll
@@ -26,20 +26,6 @@ define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b)
; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP1:%.*]] = fcmp ogt float [[TMP7]], 1.000000e+02
-; CHECK-NEXT: tail call void @llvm.experimental.noalias.scope.decl(metadata [[META0]])
-; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP7]], 1.000000e+00
-; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX5]], align 4
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 1599
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/optsize.ll b/llvm/test/Transforms/LoopVectorize/optsize.ll
index 819cfaa..9f82795 100644
--- a/llvm/test/Transforms/LoopVectorize/optsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/optsize.ll
@@ -273,19 +273,8 @@ define void @pr43371() optsize {
; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP28:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY29:.*]]
; CHECK: [[FOR_COND_CLEANUP28]]:
; CHECK-NEXT: unreachable
-; CHECK: [[FOR_BODY29]]:
-; CHECK-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ]
-; CHECK-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]]
-; CHECK-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32
-; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]]
-; CHECK-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1
-; CHECK-NEXT: [[INC37]] = add i16 [[I24_0170]], 1
-; CHECK-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756
-; CHECK-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]]
;
; PGSO-LABEL: define void @pr43371(
; PGSO-SAME: ) #[[ATTR0]] {
@@ -310,19 +299,8 @@ define void @pr43371() optsize {
; PGSO-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; PGSO: [[MIDDLE_BLOCK]]:
; PGSO-NEXT: br label %[[FOR_COND_CLEANUP28:.*]]
-; PGSO: [[SCALAR_PH:.*]]:
-; PGSO-NEXT: br label %[[FOR_BODY29:.*]]
; PGSO: [[FOR_COND_CLEANUP28]]:
; PGSO-NEXT: unreachable
-; PGSO: [[FOR_BODY29]]:
-; PGSO-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ]
-; PGSO-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]]
-; PGSO-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32
-; PGSO-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]]
-; PGSO-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1
-; PGSO-NEXT: [[INC37]] = add i16 [[I24_0170]], 1
-; PGSO-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756
-; PGSO-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]]
;
; NPGSO-LABEL: define void @pr43371(
; NPGSO-SAME: ) #[[ATTR0]] {
@@ -347,19 +325,8 @@ define void @pr43371() optsize {
; NPGSO-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; NPGSO: [[MIDDLE_BLOCK]]:
; NPGSO-NEXT: br label %[[FOR_COND_CLEANUP28:.*]]
-; NPGSO: [[SCALAR_PH:.*]]:
-; NPGSO-NEXT: br label %[[FOR_BODY29:.*]]
; NPGSO: [[FOR_COND_CLEANUP28]]:
; NPGSO-NEXT: unreachable
-; NPGSO: [[FOR_BODY29]]:
-; NPGSO-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ]
-; NPGSO-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]]
-; NPGSO-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32
-; NPGSO-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]]
-; NPGSO-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1
-; NPGSO-NEXT: [[INC37]] = add i16 [[I24_0170]], 1
-; NPGSO-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756
-; NPGSO-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]]
;
; We do not want to generate SCEV predicates when optimising for size, because
; that will lead to extra code generation such as the SCEV overflow runtime
@@ -407,19 +374,8 @@ define void @pr43371_pgso() !prof !14 {
; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP28:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY29:.*]]
; CHECK: [[FOR_COND_CLEANUP28]]:
; CHECK-NEXT: unreachable
-; CHECK: [[FOR_BODY29]]:
-; CHECK-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ]
-; CHECK-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]]
-; CHECK-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32
-; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]]
-; CHECK-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1
-; CHECK-NEXT: [[INC37]] = add i16 [[I24_0170]], 1
-; CHECK-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756
-; CHECK-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]]
;
; PGSO-LABEL: define void @pr43371_pgso(
; PGSO-SAME: ) !prof [[PROF14]] {
@@ -444,19 +400,8 @@ define void @pr43371_pgso() !prof !14 {
; PGSO-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; PGSO: [[MIDDLE_BLOCK]]:
; PGSO-NEXT: br label %[[FOR_COND_CLEANUP28:.*]]
-; PGSO: [[SCALAR_PH:.*]]:
-; PGSO-NEXT: br label %[[FOR_BODY29:.*]]
; PGSO: [[FOR_COND_CLEANUP28]]:
; PGSO-NEXT: unreachable
-; PGSO: [[FOR_BODY29]]:
-; PGSO-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ]
-; PGSO-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]]
-; PGSO-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32
-; PGSO-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]]
-; PGSO-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1
-; PGSO-NEXT: [[INC37]] = add i16 [[I24_0170]], 1
-; PGSO-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756
-; PGSO-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]]
;
; NPGSO-LABEL: define void @pr43371_pgso(
; NPGSO-SAME: ) !prof [[PROF14]] {
@@ -686,16 +631,6 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize {
; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]]
-; CHECK-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]]
-; CHECK-NEXT: store i16 42, ptr [[GEPOFB]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: ret void
;
@@ -734,16 +669,6 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize {
; PGSO-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; PGSO: [[MIDDLE_BLOCK]]:
; PGSO-NEXT: br label %[[FOR_END:.*]]
-; PGSO: [[SCALAR_PH:.*]]:
-; PGSO-NEXT: br label %[[FOR_BODY:.*]]
-; PGSO: [[FOR_BODY]]:
-; PGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; PGSO-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]]
-; PGSO-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]]
-; PGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4
-; PGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; PGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025
-; PGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; PGSO: [[FOR_END]]:
; PGSO-NEXT: ret void
;
@@ -782,16 +707,6 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize {
; NPGSO-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; NPGSO: [[MIDDLE_BLOCK]]:
; NPGSO-NEXT: br label %[[FOR_END:.*]]
-; NPGSO: [[SCALAR_PH:.*]]:
-; NPGSO-NEXT: br label %[[FOR_BODY:.*]]
-; NPGSO: [[FOR_BODY]]:
-; NPGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; NPGSO-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]]
-; NPGSO-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]]
-; NPGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4
-; NPGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; NPGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025
-; NPGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; NPGSO: [[FOR_END]]:
; NPGSO-NEXT: ret void
;
@@ -830,7 +745,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 {
; CHECK-NEXT: store <2 x i16> splat (i16 42), ptr [[TMP1]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024
-; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
@@ -843,7 +758,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 {
; CHECK-NEXT: store i16 42, ptr [[GEPOFB]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: ret void
;
@@ -862,7 +777,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 {
; PGSO-NEXT: store <2 x i16> splat (i16 42), ptr [[TMP1]], align 4
; PGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2
; PGSO-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024
-; PGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; PGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; PGSO: [[MIDDLE_BLOCK]]:
; PGSO-NEXT: br label %[[SCALAR_PH]]
; PGSO: [[SCALAR_PH]]:
@@ -875,7 +790,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 {
; PGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4
; PGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; PGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025
-; PGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; PGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; PGSO: [[FOR_END]]:
; PGSO-NEXT: ret void
;
@@ -894,7 +809,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 {
; NPGSO-NEXT: store <2 x i16> splat (i16 42), ptr [[TMP1]], align 4
; NPGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2
; NPGSO-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024
-; NPGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; NPGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; NPGSO: [[MIDDLE_BLOCK]]:
; NPGSO-NEXT: br label %[[SCALAR_PH]]
; NPGSO: [[SCALAR_PH]]:
@@ -907,7 +822,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 {
; NPGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4
; NPGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; NPGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025
-; NPGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; NPGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; NPGSO: [[FOR_END]]:
; NPGSO-NEXT: ret void
;
@@ -1092,10 +1007,8 @@ exit:
; CHECK: [[META17]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META16]], [[META17]]}
; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META16]], [[META17]]}
-; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META21:![0-9]+]]}
-; CHECK: [[META21]] = !{!"llvm.loop.vectorize.enable", i1 true}
-; CHECK: [[LOOP22]] = distinct !{[[LOOP22]], [[META16]], [[META17]]}
-; CHECK: [[LOOP23]] = distinct !{[[LOOP23]], [[META16]]}
+; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META16]], [[META17]]}
+; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META16]]}
;.
; PGSO: [[PROF14]] = !{!"function_entry_count", i64 0}
; PGSO: [[LOOP15]] = distinct !{[[LOOP15]], [[META16:![0-9]+]], [[META17:![0-9]+]]}
@@ -1103,10 +1016,8 @@ exit:
; PGSO: [[META17]] = !{!"llvm.loop.unroll.runtime.disable"}
; PGSO: [[LOOP18]] = distinct !{[[LOOP18]], [[META16]], [[META17]]}
; PGSO: [[LOOP19]] = distinct !{[[LOOP19]], [[META16]], [[META17]]}
-; PGSO: [[LOOP20]] = distinct !{[[LOOP20]], [[META21:![0-9]+]]}
-; PGSO: [[META21]] = !{!"llvm.loop.vectorize.enable", i1 true}
-; PGSO: [[LOOP22]] = distinct !{[[LOOP22]], [[META16]], [[META17]]}
-; PGSO: [[LOOP23]] = distinct !{[[LOOP23]], [[META16]]}
+; PGSO: [[LOOP20]] = distinct !{[[LOOP20]], [[META16]], [[META17]]}
+; PGSO: [[LOOP21]] = distinct !{[[LOOP21]], [[META16]]}
;.
; NPGSO: [[PROF14]] = !{!"function_entry_count", i64 0}
; NPGSO: [[LOOP15]] = distinct !{[[LOOP15]], [[META16:![0-9]+]], [[META17:![0-9]+]]}
@@ -1119,8 +1030,6 @@ exit:
; NPGSO: [[LOOP22]] = distinct !{[[LOOP22]], [[META16]], [[META17]]}
; NPGSO: [[LOOP23]] = distinct !{[[LOOP23]], [[META17]], [[META16]]}
; NPGSO: [[LOOP24]] = distinct !{[[LOOP24]], [[META16]], [[META17]]}
-; NPGSO: [[LOOP25]] = distinct !{[[LOOP25]], [[META26:![0-9]+]]}
-; NPGSO: [[META26]] = !{!"llvm.loop.vectorize.enable", i1 true}
-; NPGSO: [[LOOP27]] = distinct !{[[LOOP27]], [[META16]], [[META17]]}
-; NPGSO: [[LOOP28]] = distinct !{[[LOOP28]], [[META16]]}
+; NPGSO: [[LOOP25]] = distinct !{[[LOOP25]], [[META16]], [[META17]]}
+; NPGSO: [[LOOP26]] = distinct !{[[LOOP26]], [[META16]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/phi-cost.ll b/llvm/test/Transforms/LoopVectorize/phi-cost.ll
index bf5631c..7b5d0b6 100644
--- a/llvm/test/Transforms/LoopVectorize/phi-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/phi-cost.ll
@@ -185,13 +185,9 @@ define i32 @red_phi_0(i32 %start, ptr %src) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
-; CHECK-NEXT: br i1 [[TMP1]], label %[[SCALAR_PH:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
-; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: br i1 [[TMP1]], label %[[SCALAR_PH1:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: [[SCALAR_PH1]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH1:.*:]]
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: br i1 poison, label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i32> <i32 poison, i32 0>, i32 [[START]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[TMP0]])
diff --git a/llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll b/llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll
index a2563256..f2d6834 100644
--- a/llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll
@@ -29,22 +29,6 @@ define void @pr154045(ptr %p, i1 %c, i64 %x) {
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
-; CHECK-NEXT: br i1 [[C]], label %[[LATCH]], label %[[ELSE:.*]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: [[REM:%.*]] = srem i64 0, [[X]]
-; CHECK-NEXT: br label %[[LATCH]]
-; CHECK: [[LATCH]]:
-; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[REM]], %[[ELSE]] ], [ 0, %[[LOOP]] ]
-; CHECK-NEXT: [[PHI_TRUNC:%.*]] = trunc i64 [[PHI]] to i32
-; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[PHI_TRUNC]], 0
-; CHECK-NEXT: store i32 [[SHL]], ptr [[P]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 1
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/pr32859.ll b/llvm/test/Transforms/LoopVectorize/pr32859.ll
index a29a6bd..2d30e0c 100644
--- a/llvm/test/Transforms/LoopVectorize/pr32859.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr32859.ll
@@ -10,7 +10,7 @@
; CHECK: %e.0.ph = phi i32 [ 0, %if.end.2.i ], [ 0, %middle.block ]
; Function Attrs: nounwind uwtable
-define void @main() #0 {
+define void @main(i32 %n) #0 {
entry:
br label %for.cond1.preheader.i
@@ -21,7 +21,7 @@ for.cond1.preheader.i: ; preds = %if.end.2.i, %entry
if.end.2.i: ; preds = %for.cond1.preheader.i
%inc5.i = add nsw i32 %c.06.i, 1
- %cmp.i = icmp slt i32 %inc5.i, 16
+ %cmp.i = icmp slt i32 %inc5.i, %n
br i1 %cmp.i, label %for.cond1.preheader.i, label %for.cond.preheader
for.cond.preheader: ; preds = %if.end.2.i
diff --git a/llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll b/llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll
index b0e2ae6..98963a7 100644
--- a/llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll
@@ -20,18 +20,8 @@ define i16 @duplicate_lcssa(i16 %val) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI1:%.*]] = extractelement <4 x i16> [[TMP0]], i32 2
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RES:%.*]] = phi i16 [ [[VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT]], %[[LOOP]] ]
-; CHECK-NEXT: [[IV_NEXT]] = sub nsw i16 [[IV]], 1
-; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp ne i16 [[IV_NEXT]], 0
-; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[EXIT]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[LCSSA_1:%.*]] = phi i16 [ [[RES]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI1]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[LCSSA_2:%.*]] = phi i16 [ [[RES]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI1]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i16 [[LCSSA_2]]
+; CHECK-NEXT: ret i16 [[VECTOR_RECUR_EXTRACT_FOR_PHI1]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/pr44488-predication.ll b/llvm/test/Transforms/LoopVectorize/pr44488-predication.ll
index d1b912d..a1cb361 100644
--- a/llvm/test/Transforms/LoopVectorize/pr44488-predication.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr44488-predication.ll
@@ -43,26 +43,7 @@ define i16 @test_true_and_false_branch_equal() {
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 12
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_07:%.*]] = phi i16 [ 99, [[SCALAR_PH:%.*]] ], [ [[INC7:%.*]], [[FOR_LATCH:%.*]] ]
-; CHECK-NEXT: [[LV:%.*]] = load i16, ptr @v_38, align 1
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i16 [[LV]], 32767
-; CHECK-NEXT: br i1 [[CMP1]], label [[COND_END:%.*]], label [[COND_END]]
-; CHECK: cond.end:
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i16 [[LV]], 0
-; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_LATCH]], label [[COND_FALSE4:%.*]]
-; CHECK: cond.false4:
-; CHECK-NEXT: [[REM:%.*]] = srem i16 5786, [[LV]]
-; CHECK-NEXT: br label [[FOR_LATCH]]
-; CHECK: for.latch:
-; CHECK-NEXT: [[COND6:%.*]] = phi i16 [ [[REM]], [[COND_FALSE4]] ], [ 5786, [[COND_END]] ]
-; CHECK-NEXT: store i16 [[COND6]], ptr @v_39, align 1
-; CHECK-NEXT: [[INC7]] = add nsw i16 [[I_07]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[INC7]], 111
-; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[EXIT]]
+; CHECK-NEXT: br label [[FOR_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: [[RV:%.*]] = load i16, ptr @v_39, align 1
; CHECK-NEXT: ret i16 [[RV]]
diff --git a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll
index 8450db6..9ed35fb 100644
--- a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll
@@ -57,16 +57,7 @@ define void @pr45679(ptr %A) {
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
-; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 14
-; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -118,16 +109,7 @@ define void @pr45679(ptr %A) {
; VF2UF2-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16
; VF2UF2-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VF2UF2: middle.block:
-; VF2UF2-NEXT: br label [[EXIT:%.*]]
-; VF2UF2: scalar.ph:
; VF2UF2-NEXT: br label [[LOOP:%.*]]
-; VF2UF2: loop:
-; VF2UF2-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
-; VF2UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
-; VF2UF2-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
-; VF2UF2-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
-; VF2UF2-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 14
-; VF2UF2-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]]
; VF2UF2: exit:
; VF2UF2-NEXT: ret void
;
@@ -174,16 +156,7 @@ define void @pr45679(ptr %A) {
; VF1UF4-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16
; VF1UF4-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VF1UF4: middle.block:
-; VF1UF4-NEXT: br label [[EXIT:%.*]]
-; VF1UF4: scalar.ph:
; VF1UF4-NEXT: br label [[LOOP:%.*]]
-; VF1UF4: loop:
-; VF1UF4-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
-; VF1UF4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
-; VF1UF4-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
-; VF1UF4-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
-; VF1UF4-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 14
-; VF1UF4-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]]
; VF1UF4: exit:
; VF1UF4-NEXT: ret void
;
@@ -253,17 +226,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) {
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 14
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -319,17 +282,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) {
; VF2UF2-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
; VF2UF2-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; VF2UF2: middle.block:
-; VF2UF2-NEXT: br label [[FOR_END:%.*]]
-; VF2UF2: scalar.ph:
; VF2UF2-NEXT: br label [[FOR_BODY:%.*]]
-; VF2UF2: for.body:
-; VF2UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; VF2UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; VF2UF2-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; VF2UF2-NEXT: store i64 [[V]], ptr [[B]], align 8
-; VF2UF2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; VF2UF2-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 14
-; VF2UF2-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; VF2UF2: for.end:
; VF2UF2-NEXT: ret void
;
@@ -380,17 +333,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) {
; VF1UF4-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
; VF1UF4-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; VF1UF4: middle.block:
-; VF1UF4-NEXT: br label [[FOR_END:%.*]]
-; VF1UF4: scalar.ph:
; VF1UF4-NEXT: br label [[FOR_BODY:%.*]]
-; VF1UF4: for.body:
-; VF1UF4-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; VF1UF4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; VF1UF4-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; VF1UF4-NEXT: store i64 [[V]], ptr [[B]], align 8
-; VF1UF4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; VF1UF4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 14
-; VF1UF4-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; VF1UF4: for.end:
; VF1UF4-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll b/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll
index 673d582..01c6c3f 100644
--- a/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll
@@ -31,23 +31,13 @@ define void @test(i16 %x, i64 %y, ptr %ptr) {
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[LOOP_EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: store i32 0, ptr [[PTR]], align 4
-; CHECK-NEXT: [[V2:%.*]] = trunc i64 [[IV]] to i8
-; CHECK-NEXT: [[V3:%.*]] = add i8 [[V2]], 1
-; CHECK-NEXT: [[CMP15:%.*]] = icmp slt i8 [[V3]], 5
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[INC]]
-; CHECK-NEXT: br i1 [[CMP15]], label [[LOOP]], label [[LOOP_EXIT]]
; CHECK: loop.exit:
; CHECK-NEXT: [[DIV_1:%.*]] = udiv i64 [[Y]], [[ADD]]
; CHECK-NEXT: [[V1:%.*]] = add i64 [[DIV_1]], 1
; CHECK-NEXT: br label [[LOOP_2:%.*]]
; CHECK: loop.2:
-; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[IV_NEXT_1:%.*]], [[LOOP_2]] ], [ 0, [[LOOP_EXIT]] ]
+; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[IV_NEXT_1:%.*]], [[LOOP_2]] ], [ 0, [[LOOP]] ]
; CHECK-NEXT: [[IV_NEXT_1]] = add i64 [[IV_1]], [[V1]]
; CHECK-NEXT: call void @use(i64 [[IV_NEXT_1]])
; CHECK-NEXT: [[EC:%.*]] = icmp ult i64 [[IV_NEXT_1]], 200
diff --git a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll
index 75437fe..615ea06 100644
--- a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll
@@ -61,24 +61,9 @@ define dso_local i16 @reverse_interleave_load_fold_mask() optsize {
; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP28:%.*]] = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> [[TMP26]])
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 41, [[SCALAR_PH:%.*]] ], [ [[IVMINUS1:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[PREVSUM:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IVMINUS1]] = add nsw i16 [[IV]], -1
-; CHECK-NEXT: [[GEPA0:%.*]] = getelementptr inbounds [40 x [4 x i16]], ptr @A, i16 0, i16 [[IVMINUS1]], i16 0
-; CHECK-NEXT: [[TMP29:%.*]] = load i16, ptr [[GEPA0]], align 1
-; CHECK-NEXT: [[GEPA3:%.*]] = getelementptr inbounds [40 x [4 x i16]], ptr @A, i16 0, i16 [[IVMINUS1]], i16 3
-; CHECK-NEXT: [[TMP30:%.*]] = load i16, ptr [[GEPA3]], align 1
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i16 [[TMP29]], [[TMP30]]
-; CHECK-NEXT: [[PREVSUM]] = add nsw i16 [[SUM]], [[ADD]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i16 [[IV]], 1
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[PREVSUM_LCSSA:%.*]] = phi i16 [ [[PREVSUM]], [[LOOP]] ], [ [[TMP28]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i16 [[PREVSUM_LCSSA]]
+; CHECK-NEXT: ret i16 [[TMP28]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll b/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll
index 637b4ab..7b35009 100644
--- a/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll
@@ -33,31 +33,9 @@ define i32 @test(i32 %a, i1 %c.1, i1 %c.2 ) #0 {
; CHECK: middle.block:
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[PREDPHI7]])
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i32> [[PREDPHI5]], i32 1
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 6, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[V_2:%.*]] = phi i32 [ 35902, [[SCALAR_PH]] ], [ [[P_2:%.*]], [[LOOP_LATCH]] ]
-; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_LATCH]], label [[BODY_1:%.*]]
-; CHECK: body.1:
-; CHECK-NEXT: [[V_2_ADD:%.*]] = add i32 [[V_2]], 10
-; CHECK-NEXT: br i1 [[C_1]], label [[LOOP_LATCH]], label [[BODY_2:%.*]]
-; CHECK: body.2:
-; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[V_2_ADD]], 20
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A]], 1
-; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[ADD_1]], [[XOR]]
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[P_1:%.*]] = phi i32 [ [[IV]], [[LOOP_HEADER]] ], [ 9, [[BODY_1]] ], [ 9, [[BODY_2]] ]
-; CHECK-NEXT: [[P_2]] = phi i32 [ [[V_2]], [[LOOP_HEADER]] ], [ [[V_2_ADD]], [[BODY_1]] ], [ [[ADD_2]], [[BODY_2]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp ult i32 [[IV]], 181
-; CHECK-NEXT: br i1 [[EC]], label [[LOOP_HEADER]], label [[EXIT]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
-; CHECK-NEXT: [[E_1:%.*]] = phi i32 [ [[P_1]], [[LOOP_LATCH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[E_2:%.*]] = phi i32 [ [[P_2]], [[LOOP_LATCH]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[RES:%.*]] = add i32 [[E_1]], [[E_2]]
+; CHECK-NEXT: [[RES:%.*]] = add i32 [[TMP9]], [[TMP10]]
; CHECK-NEXT: ret i32 [[RES]]
;
bb:
diff --git a/llvm/test/Transforms/LoopVectorize/pr66616.ll b/llvm/test/Transforms/LoopVectorize/pr66616.ll
index d5b2519..1ef614a 100644
--- a/llvm/test/Transforms/LoopVectorize/pr66616.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr66616.ll
@@ -18,41 +18,32 @@ define void @pr66616(ptr %ptr) {
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[PREHEADER:%.*]]
-; CHECK: scalar.ph:
+; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP0]], [[VECTOR_BODY]] ]
; CHECK-NEXT: br label [[LOOP_1:%.*]]
-; CHECK: loop.1:
-; CHECK-NEXT: [[IV_1:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[LOOP_1]] ]
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[PTR]], align 4
-; CHECK-NEXT: [[ADD3:%.*]] = add i32 [[LOAD]], 1
-; CHECK-NEXT: [[INC]] = add i8 [[IV_1]], 1
-; CHECK-NEXT: [[COND1:%.*]] = icmp eq i8 [[INC]], 0
-; CHECK-NEXT: br i1 [[COND1]], label [[PREHEADER]], label [[LOOP_1]]
; CHECK: preheader:
-; CHECK-NEXT: [[ADD3_LCSSA:%.*]] = phi i32 [ [[ADD3]], [[LOOP_1]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = sub i32 0, [[ADD3_LCSSA]]
+; CHECK-NEXT: [[TMP4:%.*]] = sub i32 -1, [[DOTLCSSA]]
; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP5]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP6]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH1:%.*]], label [[VECTOR_PH2:%.*]]
-; CHECK: vector.ph2:
+; CHECK: vector.ph1:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP6]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP6]], [[N_MOD_VF]]
; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; CHECK-NEXT: [[IND_END:%.*]] = add i32 [[ADD3_LCSSA]], [[DOTCAST]]
+; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP3]], [[DOTCAST]]
; CHECK-NEXT: [[IND_END5:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[N_VEC]]
; CHECK-NEXT: br label [[VECTOR_BODY3:%.*]]
-; CHECK: vector.body3:
+; CHECK: vector.body2:
; CHECK-NEXT: [[INDEX8:%.*]] = phi i64 [ 0, [[VECTOR_PH2]] ], [ [[INDEX_NEXT9:%.*]], [[VECTOR_BODY3]] ]
; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX8]], 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK6:%.*]], label [[VECTOR_BODY3]], !llvm.loop [[LOOP3:![0-9]+]]
-; CHECK: middle.block6:
+; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK5:%.*]], label [[VECTOR_BODY3]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: middle.block5:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP6]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH1]]
-; CHECK: scalar.ph1:
-; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK6]] ], [ [[ADD3_LCSSA]], [[PREHEADER]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[IND_END5]], [[MIDDLE_BLOCK6]] ], [ [[PTR]], [[PREHEADER]] ]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[TMP8]], [[MIDDLE_BLOCK5]] ], [ [[TMP3]], [[LOOP_1]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[IND_END5]], [[MIDDLE_BLOCK5]] ], [ [[PTR]], [[LOOP_1]] ]
; CHECK-NEXT: br label [[LOOP_2:%.*]]
; CHECK: loop.2:
; CHECK-NEXT: [[IV_2:%.*]] = phi i32 [ [[IV_2_I:%.*]], [[LOOP_2]] ], [ [[BC_RESUME_VAL4]], [[SCALAR_PH1]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/predicate-switch.ll b/llvm/test/Transforms/LoopVectorize/predicate-switch.ll
index 70428f0..565e203 100644
--- a/llvm/test/Transforms/LoopVectorize/predicate-switch.ll
+++ b/llvm/test/Transforms/LoopVectorize/predicate-switch.ll
@@ -425,20 +425,6 @@ define void @switch_all_to_default(ptr %start) {
; IC1-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IC1: [[MIDDLE_BLOCK]]:
; IC1-NEXT: br label %[[EXIT:.*]]
-; IC1: [[SCALAR_PH:.*]]:
-; IC1-NEXT: br label %[[LOOP_HEADER:.*]]
-; IC1: [[LOOP_HEADER]]:
-; IC1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; IC1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IC1-NEXT: switch i64 [[IV]], label %[[LOOP_LATCH]] [
-; IC1-NEXT: i64 120, label %[[LOOP_LATCH]]
-; IC1-NEXT: i64 100, label %[[LOOP_LATCH]]
-; IC1-NEXT: ]
-; IC1: [[LOOP_LATCH]]:
-; IC1-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[START]], i64 [[IV]]
-; IC1-NEXT: store i64 42, ptr [[GEP]], align 1
-; IC1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 100
-; IC1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]]
; IC1: [[EXIT]]:
; IC1-NEXT: ret void
;
@@ -459,20 +445,6 @@ define void @switch_all_to_default(ptr %start) {
; IC2-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; IC2: [[MIDDLE_BLOCK]]:
; IC2-NEXT: br label %[[EXIT:.*]]
-; IC2: [[SCALAR_PH:.*]]:
-; IC2-NEXT: br label %[[LOOP_HEADER:.*]]
-; IC2: [[LOOP_HEADER]]:
-; IC2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; IC2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IC2-NEXT: switch i64 [[IV]], label %[[LOOP_LATCH]] [
-; IC2-NEXT: i64 120, label %[[LOOP_LATCH]]
-; IC2-NEXT: i64 100, label %[[LOOP_LATCH]]
-; IC2-NEXT: ]
-; IC2: [[LOOP_LATCH]]:
-; IC2-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[START]], i64 [[IV]]
-; IC2-NEXT: store i64 42, ptr [[GEP]], align 1
-; IC2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 100
-; IC2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]]
; IC2: [[EXIT]]:
; IC2-NEXT: ret void
;
@@ -513,21 +485,6 @@ define void @switch_unconditional(ptr %start) {
; IC1-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IC1: [[MIDDLE_BLOCK]]:
; IC1-NEXT: br label %[[EXIT:.*]]
-; IC1: [[SCALAR_PH:.*]]:
-; IC1-NEXT: br label %[[LOOP:.*]]
-; IC1: [[LOOP]]:
-; IC1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
-; IC1-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[START]], i64 [[IV]]
-; IC1-NEXT: [[X:%.*]] = load i32, ptr [[GEP]], align 4
-; IC1-NEXT: switch i32 [[X]], label %[[FOO:.*]] [
-; IC1-NEXT: ]
-; IC1: [[FOO]]:
-; IC1-NEXT: br label %[[LATCH]]
-; IC1: [[LATCH]]:
-; IC1-NEXT: store i32 0, ptr [[GEP]], align 4
-; IC1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IC1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 100
-; IC1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP]]
; IC1: [[EXIT]]:
; IC1-NEXT: ret void
;
@@ -548,21 +505,6 @@ define void @switch_unconditional(ptr %start) {
; IC2-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IC2: [[MIDDLE_BLOCK]]:
; IC2-NEXT: br label %[[EXIT:.*]]
-; IC2: [[SCALAR_PH:.*]]:
-; IC2-NEXT: br label %[[LOOP:.*]]
-; IC2: [[LOOP]]:
-; IC2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
-; IC2-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[START]], i64 [[IV]]
-; IC2-NEXT: [[X:%.*]] = load i32, ptr [[GEP]], align 4
-; IC2-NEXT: switch i32 [[X]], label %[[FOO:.*]] [
-; IC2-NEXT: ]
-; IC2: [[FOO]]:
-; IC2-NEXT: br label %[[LATCH]]
-; IC2: [[LATCH]]:
-; IC2-NEXT: store i32 0, ptr [[GEP]], align 4
-; IC2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; IC2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 100
-; IC2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP]]
; IC2: [[EXIT]]:
; IC2-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll
index dfdaaf1..52555d5 100644
--- a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll
+++ b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll
@@ -58,26 +58,6 @@ define void @loop_invariant_store(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2
-; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48
-; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SHL]], 52
-; CHECK-NEXT: [[TRUNC_I32:%.*]] = trunc i64 [[ASHR]] to i32
-; CHECK-NEXT: br i1 [[CMP_SLT]], label %[[COND_FALSE:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[COND_FALSE]]:
-; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[B]] to i32
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TRUNC_I32]], %[[LOOP_HEADER]] ], [ [[ZEXT]], %[[COND_FALSE]] ]
-; CHECK-NEXT: [[SHL_I32:%.*]] = shl i32 [[COND]], 8
-; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SHL_I32]] to i8
-; CHECK-NEXT: store i8 [[TRUNC]], ptr [[P]], align 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV]], 8
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -174,28 +154,6 @@ define void @loop_invariant_srem(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
-; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i8 [[IV]], 2
-; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48
-; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SHL]], 52
-; CHECK-NEXT: [[TRUNC_I32:%.*]] = trunc i64 [[ASHR]] to i32
-; CHECK-NEXT: br i1 [[CMP_SLT]], label %[[COND_FALSE:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[COND_FALSE]]:
-; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[B]] to i32
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TRUNC_I32]], %[[LOOP_HEADER]] ], [ [[ZEXT]], %[[COND_FALSE]] ]
-; CHECK-NEXT: [[SHL_I32:%.*]] = shl i32 [[COND]], 8
-; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SHL_I32]] to i8
-; CHECK-NEXT: [[REM:%.*]] = srem i8 [[IV]], [[TRUNC]]
-; CHECK-NEXT: [[GEP_P_REM:%.*]] = getelementptr i32, ptr [[P]], i8 [[REM]]
-; CHECK-NEXT: store i32 4, ptr [[GEP_P_REM]], align 4
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[IV]], 8
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -245,19 +203,6 @@ define void @loop_invariant_float_store(ptr %p, i32 %a) {
; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2
-; CHECK-NEXT: br i1 [[CMP_SLT]], label %[[COND_FALSE:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[COND_FALSE]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: store float [[TMP10]], ptr [[P]], align 4
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp slt i32 [[IV]], 8
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -315,19 +260,6 @@ define void @test_store_to_invariant_address_needs_mask_due_to_low_trip_count(pt
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]]
-; CHECK: [[ELSE]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ 1, %[[LOOP_HEADER]] ], [ 0, %[[ELSE]] ]
-; CHECK-NEXT: store i32 [[MERGE]], ptr [[DST]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 3
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll b/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll
index 14526af..6542c42 100644
--- a/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll
+++ b/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll
@@ -27,17 +27,6 @@ define void @_Z3fooPf(ptr %a) {
; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[P:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[MUL:%.*]] = fmul float [[P]], 2.000000e+00
-; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: ret void
;
@@ -58,25 +47,8 @@ define void @_Z3fooPf(ptr %a) {
; DEBUGLOC-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG24]], !llvm.loop [[LOOP25:![0-9]+]]
; DEBUGLOC: [[MIDDLE_BLOCK]]:
; DEBUGLOC-NEXT: br label %[[FOR_END:.*]], !dbg [[DBG24]]
-; DEBUGLOC: [[SCALAR_PH:.*]]:
-; DEBUGLOC-NEXT: br label %[[FOR_BODY:.*]], !dbg [[DBG18]]
-; DEBUGLOC: [[FOR_BODY]]:
-; DEBUGLOC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], !dbg [[DBG19]]
-; DEBUGLOC-NEXT: #dbg_value(i64 [[INDVARS_IV]], [[META9:![0-9]+]], !DIExpression(), [[DBG19]])
-; DEBUGLOC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]], !dbg [[DBG20]]
-; DEBUGLOC-NEXT: #dbg_value(ptr [[ARRAYIDX]], [[META11:![0-9]+]], !DIExpression(), [[DBG20]])
-; DEBUGLOC-NEXT: [[P:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !dbg [[DBG21]]
-; DEBUGLOC-NEXT: #dbg_value(float [[P]], [[META12:![0-9]+]], !DIExpression(), [[DBG21]])
-; DEBUGLOC-NEXT: [[MUL:%.*]] = fmul float [[P]], 2.000000e+00, !dbg [[DBG22]]
-; DEBUGLOC-NEXT: #dbg_value(float [[MUL]], [[META14:![0-9]+]], !DIExpression(), [[DBG22]])
-; DEBUGLOC-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4, !dbg [[DBG23]]
-; DEBUGLOC-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1, !dbg [[DBG28:![0-9]+]]
-; DEBUGLOC-NEXT: #dbg_value(i64 [[INDVARS_IV_NEXT]], [[META15:![0-9]+]], !DIExpression(), [[DBG28]])
-; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024, !dbg [[DBG29:![0-9]+]]
-; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META16:![0-9]+]], !DIExpression(), [[DBG29]])
-; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !dbg [[DBG24]], !llvm.loop [[LOOP30:![0-9]+]]
; DEBUGLOC: [[FOR_END]]:
-; DEBUGLOC-NEXT: ret void, !dbg [[DBG32:![0-9]+]]
+; DEBUGLOC-NEXT: ret void, !dbg [[DBG28:![0-9]+]]
;
entry:
br label %for.body
@@ -122,7 +94,7 @@ define void @widen_ptr_induction_dbg(ptr %start, ptr %end) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 32
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -134,54 +106,54 @@ define void @widen_ptr_induction_dbg(ptr %start, ptr %end) {
; CHECK-NEXT: [[IV_NEXT]] = getelementptr inbounds ptr, ptr [[IV]], i64 1
; CHECK-NEXT: store ptr [[IV]], ptr [[IV]], align 1
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[IV_NEXT]], [[END]]
-; CHECK-NEXT: br i1 [[CMP_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
; DEBUGLOC-LABEL: define void @widen_ptr_induction_dbg(
-; DEBUGLOC-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) !dbg [[DBG33:![0-9]+]] {
+; DEBUGLOC-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) !dbg [[DBG29:![0-9]+]] {
; DEBUGLOC-NEXT: [[ENTRY:.*]]:
-; DEBUGLOC-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64, !dbg [[DBG38:![0-9]+]]
-; DEBUGLOC-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64, !dbg [[DBG38]]
-; DEBUGLOC-NEXT: [[TMP0:%.*]] = add i64 [[END1]], -8, !dbg [[DBG38]]
-; DEBUGLOC-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]], !dbg [[DBG38]]
-; DEBUGLOC-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3, !dbg [[DBG38]]
-; DEBUGLOC-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1, !dbg [[DBG38]]
-; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 4, !dbg [[DBG38]]
-; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !dbg [[DBG38]]
+; DEBUGLOC-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64, !dbg [[DBG34:![0-9]+]]
+; DEBUGLOC-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64, !dbg [[DBG34]]
+; DEBUGLOC-NEXT: [[TMP0:%.*]] = add i64 [[END1]], -8, !dbg [[DBG34]]
+; DEBUGLOC-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]], !dbg [[DBG34]]
+; DEBUGLOC-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3, !dbg [[DBG34]]
+; DEBUGLOC-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1, !dbg [[DBG34]]
+; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 4, !dbg [[DBG34]]
+; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !dbg [[DBG34]]
; DEBUGLOC: [[VECTOR_PH]]:
; DEBUGLOC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 4
; DEBUGLOC-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
; DEBUGLOC-NEXT: [[TMP4:%.*]] = mul i64 [[N_VEC]], 8
; DEBUGLOC-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP4]]
-; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG38]]
+; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG34]]
; DEBUGLOC: [[VECTOR_BODY]]:
; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; DEBUGLOC-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], %[[VECTOR_PH]] ], [ [[PTR_IND:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG39:![0-9]+]]
-; DEBUGLOC-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> <i64 0, i64 8, i64 16, i64 24>, !dbg [[DBG39]]
-; DEBUGLOC-NEXT: [[TMP6:%.*]] = extractelement <4 x ptr> [[VECTOR_GEP]], i32 0, !dbg [[DBG40:![0-9]+]]
-; DEBUGLOC-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP6]], align 1, !dbg [[DBG40]]
+; DEBUGLOC-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], %[[VECTOR_PH]] ], [ [[PTR_IND:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG35:![0-9]+]]
+; DEBUGLOC-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> <i64 0, i64 8, i64 16, i64 24>, !dbg [[DBG35]]
+; DEBUGLOC-NEXT: [[TMP6:%.*]] = extractelement <4 x ptr> [[VECTOR_GEP]], i32 0, !dbg [[DBG36:![0-9]+]]
+; DEBUGLOC-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP6]], align 1, !dbg [[DBG36]]
; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; DEBUGLOC-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 32, !dbg [[DBG39]]
-; DEBUGLOC-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG41:![0-9]+]]
-; DEBUGLOC-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG41]], !llvm.loop [[LOOP42:![0-9]+]]
+; DEBUGLOC-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 32, !dbg [[DBG35]]
+; DEBUGLOC-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG37:![0-9]+]]
+; DEBUGLOC-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG37]], !llvm.loop [[LOOP38:![0-9]+]]
; DEBUGLOC: [[MIDDLE_BLOCK]]:
-; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]], !dbg [[DBG41]]
-; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG41]]
+; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]], !dbg [[DBG37]]
+; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG37]]
; DEBUGLOC: [[SCALAR_PH]]:
-; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ], !dbg [[DBG39]]
-; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG38]]
+; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ], !dbg [[DBG35]]
+; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG34]]
; DEBUGLOC: [[LOOP]]:
-; DEBUGLOC-NEXT: [[IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG39]]
-; DEBUGLOC-NEXT: #dbg_value(ptr [[IV]], [[META35:![0-9]+]], !DIExpression(), [[DBG39]])
-; DEBUGLOC-NEXT: [[IV_NEXT]] = getelementptr inbounds ptr, ptr [[IV]], i64 1, !dbg [[DBG43:![0-9]+]]
-; DEBUGLOC-NEXT: #dbg_value(ptr [[IV_NEXT]], [[META36:![0-9]+]], !DIExpression(), [[DBG43]])
-; DEBUGLOC-NEXT: store ptr [[IV]], ptr [[IV]], align 1, !dbg [[DBG40]]
-; DEBUGLOC-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[IV_NEXT]], [[END]], !dbg [[DBG44:![0-9]+]]
-; DEBUGLOC-NEXT: #dbg_value(i1 [[CMP_NOT]], [[META37:![0-9]+]], !DIExpression(), [[DBG44]])
-; DEBUGLOC-NEXT: br i1 [[CMP_NOT]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG41]], !llvm.loop [[LOOP45:![0-9]+]]
+; DEBUGLOC-NEXT: [[IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG35]]
+; DEBUGLOC-NEXT: #dbg_value(ptr [[IV]], [[META31:![0-9]+]], !DIExpression(), [[DBG35]])
+; DEBUGLOC-NEXT: [[IV_NEXT]] = getelementptr inbounds ptr, ptr [[IV]], i64 1, !dbg [[DBG39:![0-9]+]]
+; DEBUGLOC-NEXT: #dbg_value(ptr [[IV_NEXT]], [[META32:![0-9]+]], !DIExpression(), [[DBG39]])
+; DEBUGLOC-NEXT: store ptr [[IV]], ptr [[IV]], align 1, !dbg [[DBG36]]
+; DEBUGLOC-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[IV_NEXT]], [[END]], !dbg [[DBG40:![0-9]+]]
+; DEBUGLOC-NEXT: #dbg_value(i1 [[CMP_NOT]], [[META33:![0-9]+]], !DIExpression(), [[DBG40]])
+; DEBUGLOC-NEXT: br i1 [[CMP_NOT]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG37]], !llvm.loop [[LOOP41:![0-9]+]]
; DEBUGLOC: [[EXIT]]:
-; DEBUGLOC-NEXT: ret void, !dbg [[DBG46:![0-9]+]]
+; DEBUGLOC-NEXT: ret void, !dbg [[DBG42:![0-9]+]]
;
entry:
br label %loop
@@ -254,7 +226,7 @@ define void @predicated_phi_dbg(i64 %n, ptr %x) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
@@ -274,96 +246,96 @@ define void @predicated_phi_dbg(i64 %n, ptr %x) {
; CHECK-NEXT: store i64 [[D]], ptr [[IDX]], align 8
; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: ret void
;
; DEBUGLOC-LABEL: define void @predicated_phi_dbg(
-; DEBUGLOC-SAME: i64 [[N:%.*]], ptr [[X:%.*]]) !dbg [[DBG47:![0-9]+]] {
+; DEBUGLOC-SAME: i64 [[N:%.*]], ptr [[X:%.*]]) !dbg [[DBG43:![0-9]+]] {
; DEBUGLOC-NEXT: [[ENTRY:.*]]:
-; DEBUGLOC-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1), !dbg [[DBG56:![0-9]+]]
-; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4, !dbg [[DBG56]]
-; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !dbg [[DBG56]]
+; DEBUGLOC-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1), !dbg [[DBG52:![0-9]+]]
+; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4, !dbg [[DBG52]]
+; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !dbg [[DBG52]]
; DEBUGLOC: [[VECTOR_PH]]:
; DEBUGLOC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4
; DEBUGLOC-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]]
-; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG56]]
+; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG52]]
; DEBUGLOC: [[VECTOR_BODY]]:
-; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_UDIV_CONTINUE6:.*]] ], !dbg [[DBG57:![0-9]+]]
-; DEBUGLOC-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_UDIV_CONTINUE6]] ], !dbg [[DBG57]]
-; DEBUGLOC-NEXT: [[TMP0:%.*]] = icmp ult <4 x i64> [[VEC_IND]], splat (i64 5), !dbg [[DBG58:![0-9]+]]
-; DEBUGLOC-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0, !dbg [[DBG58]]
-; DEBUGLOC-NEXT: br i1 [[TMP1]], label %[[PRED_UDIV_IF:.*]], label %[[PRED_UDIV_CONTINUE:.*]], !dbg [[DBG58]]
+; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_UDIV_CONTINUE6:.*]] ], !dbg [[DBG53:![0-9]+]]
+; DEBUGLOC-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_UDIV_CONTINUE6]] ], !dbg [[DBG53]]
+; DEBUGLOC-NEXT: [[TMP0:%.*]] = icmp ult <4 x i64> [[VEC_IND]], splat (i64 5), !dbg [[DBG54:![0-9]+]]
+; DEBUGLOC-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0, !dbg [[DBG54]]
+; DEBUGLOC-NEXT: br i1 [[TMP1]], label %[[PRED_UDIV_IF:.*]], label %[[PRED_UDIV_CONTINUE:.*]], !dbg [[DBG54]]
; DEBUGLOC: [[PRED_UDIV_IF]]:
-; DEBUGLOC-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0, !dbg [[DBG57]]
-; DEBUGLOC-NEXT: [[TMP3:%.*]] = udiv i64 [[N]], [[TMP2]], !dbg [[DBG59:![0-9]+]]
-; DEBUGLOC-NEXT: [[TMP4:%.*]] = insertelement <4 x i64> poison, i64 [[TMP3]], i32 0, !dbg [[DBG59]]
-; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE]], !dbg [[DBG58]]
+; DEBUGLOC-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0, !dbg [[DBG53]]
+; DEBUGLOC-NEXT: [[TMP3:%.*]] = udiv i64 [[N]], [[TMP2]], !dbg [[DBG55:![0-9]+]]
+; DEBUGLOC-NEXT: [[TMP4:%.*]] = insertelement <4 x i64> poison, i64 [[TMP3]], i32 0, !dbg [[DBG55]]
+; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE]], !dbg [[DBG54]]
; DEBUGLOC: [[PRED_UDIV_CONTINUE]]:
-; DEBUGLOC-NEXT: [[TMP5:%.*]] = phi <4 x i64> [ poison, %[[VECTOR_BODY]] ], [ [[TMP4]], %[[PRED_UDIV_IF]] ], !dbg [[DBG59]]
-; DEBUGLOC-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1, !dbg [[DBG58]]
-; DEBUGLOC-NEXT: br i1 [[TMP6]], label %[[PRED_UDIV_IF1:.*]], label %[[PRED_UDIV_CONTINUE2:.*]], !dbg [[DBG58]]
+; DEBUGLOC-NEXT: [[TMP5:%.*]] = phi <4 x i64> [ poison, %[[VECTOR_BODY]] ], [ [[TMP4]], %[[PRED_UDIV_IF]] ], !dbg [[DBG55]]
+; DEBUGLOC-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1, !dbg [[DBG54]]
+; DEBUGLOC-NEXT: br i1 [[TMP6]], label %[[PRED_UDIV_IF1:.*]], label %[[PRED_UDIV_CONTINUE2:.*]], !dbg [[DBG54]]
; DEBUGLOC: [[PRED_UDIV_IF1]]:
-; DEBUGLOC-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1, !dbg [[DBG57]]
-; DEBUGLOC-NEXT: [[TMP8:%.*]] = udiv i64 [[N]], [[TMP7]], !dbg [[DBG59]]
-; DEBUGLOC-NEXT: [[TMP9:%.*]] = insertelement <4 x i64> [[TMP5]], i64 [[TMP8]], i32 1, !dbg [[DBG59]]
-; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE2]], !dbg [[DBG58]]
+; DEBUGLOC-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1, !dbg [[DBG53]]
+; DEBUGLOC-NEXT: [[TMP8:%.*]] = udiv i64 [[N]], [[TMP7]], !dbg [[DBG55]]
+; DEBUGLOC-NEXT: [[TMP9:%.*]] = insertelement <4 x i64> [[TMP5]], i64 [[TMP8]], i32 1, !dbg [[DBG55]]
+; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE2]], !dbg [[DBG54]]
; DEBUGLOC: [[PRED_UDIV_CONTINUE2]]:
-; DEBUGLOC-NEXT: [[TMP10:%.*]] = phi <4 x i64> [ [[TMP5]], %[[PRED_UDIV_CONTINUE]] ], [ [[TMP9]], %[[PRED_UDIV_IF1]] ], !dbg [[DBG59]]
-; DEBUGLOC-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2, !dbg [[DBG58]]
-; DEBUGLOC-NEXT: br i1 [[TMP11]], label %[[PRED_UDIV_IF3:.*]], label %[[PRED_UDIV_CONTINUE4:.*]], !dbg [[DBG58]]
+; DEBUGLOC-NEXT: [[TMP10:%.*]] = phi <4 x i64> [ [[TMP5]], %[[PRED_UDIV_CONTINUE]] ], [ [[TMP9]], %[[PRED_UDIV_IF1]] ], !dbg [[DBG55]]
+; DEBUGLOC-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2, !dbg [[DBG54]]
+; DEBUGLOC-NEXT: br i1 [[TMP11]], label %[[PRED_UDIV_IF3:.*]], label %[[PRED_UDIV_CONTINUE4:.*]], !dbg [[DBG54]]
; DEBUGLOC: [[PRED_UDIV_IF3]]:
-; DEBUGLOC-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 2, !dbg [[DBG57]]
-; DEBUGLOC-NEXT: [[TMP13:%.*]] = udiv i64 [[N]], [[TMP12]], !dbg [[DBG59]]
-; DEBUGLOC-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP10]], i64 [[TMP13]], i32 2, !dbg [[DBG59]]
-; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE4]], !dbg [[DBG58]]
+; DEBUGLOC-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 2, !dbg [[DBG53]]
+; DEBUGLOC-NEXT: [[TMP13:%.*]] = udiv i64 [[N]], [[TMP12]], !dbg [[DBG55]]
+; DEBUGLOC-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP10]], i64 [[TMP13]], i32 2, !dbg [[DBG55]]
+; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE4]], !dbg [[DBG54]]
; DEBUGLOC: [[PRED_UDIV_CONTINUE4]]:
-; DEBUGLOC-NEXT: [[TMP15:%.*]] = phi <4 x i64> [ [[TMP10]], %[[PRED_UDIV_CONTINUE2]] ], [ [[TMP14]], %[[PRED_UDIV_IF3]] ], !dbg [[DBG59]]
-; DEBUGLOC-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3, !dbg [[DBG58]]
-; DEBUGLOC-NEXT: br i1 [[TMP16]], label %[[PRED_UDIV_IF5:.*]], label %[[PRED_UDIV_CONTINUE6]], !dbg [[DBG58]]
+; DEBUGLOC-NEXT: [[TMP15:%.*]] = phi <4 x i64> [ [[TMP10]], %[[PRED_UDIV_CONTINUE2]] ], [ [[TMP14]], %[[PRED_UDIV_IF3]] ], !dbg [[DBG55]]
+; DEBUGLOC-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3, !dbg [[DBG54]]
+; DEBUGLOC-NEXT: br i1 [[TMP16]], label %[[PRED_UDIV_IF5:.*]], label %[[PRED_UDIV_CONTINUE6]], !dbg [[DBG54]]
; DEBUGLOC: [[PRED_UDIV_IF5]]:
-; DEBUGLOC-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 3, !dbg [[DBG57]]
-; DEBUGLOC-NEXT: [[TMP18:%.*]] = udiv i64 [[N]], [[TMP17]], !dbg [[DBG59]]
-; DEBUGLOC-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP18]], i32 3, !dbg [[DBG59]]
-; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE6]], !dbg [[DBG58]]
+; DEBUGLOC-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 3, !dbg [[DBG53]]
+; DEBUGLOC-NEXT: [[TMP18:%.*]] = udiv i64 [[N]], [[TMP17]], !dbg [[DBG55]]
+; DEBUGLOC-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP18]], i32 3, !dbg [[DBG55]]
+; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE6]], !dbg [[DBG54]]
; DEBUGLOC: [[PRED_UDIV_CONTINUE6]]:
-; DEBUGLOC-NEXT: [[TMP20:%.*]] = phi <4 x i64> [ [[TMP15]], %[[PRED_UDIV_CONTINUE4]] ], [ [[TMP19]], %[[PRED_UDIV_IF5]] ], !dbg [[DBG59]]
-; DEBUGLOC-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP20]], <4 x i64> zeroinitializer, !dbg [[DBG60:![0-9]+]]
-; DEBUGLOC-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[X]], i64 [[INDEX]], !dbg [[DBG61:![0-9]+]]
-; DEBUGLOC-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP21]], align 8, !dbg [[DBG62:![0-9]+]]
-; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG57]]
-; DEBUGLOC-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4), !dbg [[DBG57]]
-; DEBUGLOC-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG63:![0-9]+]]
-; DEBUGLOC-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG63]], !llvm.loop [[LOOP64:![0-9]+]]
+; DEBUGLOC-NEXT: [[TMP20:%.*]] = phi <4 x i64> [ [[TMP15]], %[[PRED_UDIV_CONTINUE4]] ], [ [[TMP19]], %[[PRED_UDIV_IF5]] ], !dbg [[DBG55]]
+; DEBUGLOC-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP20]], <4 x i64> zeroinitializer, !dbg [[DBG56:![0-9]+]]
+; DEBUGLOC-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[X]], i64 [[INDEX]], !dbg [[DBG57:![0-9]+]]
+; DEBUGLOC-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP21]], align 8, !dbg [[DBG58:![0-9]+]]
+; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG53]]
+; DEBUGLOC-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4), !dbg [[DBG53]]
+; DEBUGLOC-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG59:![0-9]+]]
+; DEBUGLOC-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG59]], !llvm.loop [[LOOP60:![0-9]+]]
; DEBUGLOC: [[MIDDLE_BLOCK]]:
-; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]], !dbg [[DBG63]]
-; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]], !dbg [[DBG63]]
+; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]], !dbg [[DBG59]]
+; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]], !dbg [[DBG59]]
; DEBUGLOC: [[SCALAR_PH]]:
-; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], !dbg [[DBG57]]
-; DEBUGLOC-NEXT: br label %[[FOR_BODY:.*]], !dbg [[DBG56]]
+; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], !dbg [[DBG53]]
+; DEBUGLOC-NEXT: br label %[[FOR_BODY:.*]], !dbg [[DBG52]]
; DEBUGLOC: [[FOR_BODY]]:
-; DEBUGLOC-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[FOR_INC:.*]] ], !dbg [[DBG57]]
-; DEBUGLOC-NEXT: #dbg_value(i64 [[I]], [[META49:![0-9]+]], !DIExpression(), [[DBG57]])
-; DEBUGLOC-NEXT: [[CMP:%.*]] = icmp ult i64 [[I]], 5, !dbg [[DBG58]]
-; DEBUGLOC-NEXT: #dbg_value(i1 [[CMP]], [[META50:![0-9]+]], !DIExpression(), [[DBG58]])
-; DEBUGLOC-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[FOR_INC]], !dbg [[DBG65:![0-9]+]]
+; DEBUGLOC-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[FOR_INC:.*]] ], !dbg [[DBG53]]
+; DEBUGLOC-NEXT: #dbg_value(i64 [[I]], [[META45:![0-9]+]], !DIExpression(), [[DBG53]])
+; DEBUGLOC-NEXT: [[CMP:%.*]] = icmp ult i64 [[I]], 5, !dbg [[DBG54]]
+; DEBUGLOC-NEXT: #dbg_value(i1 [[CMP]], [[META46:![0-9]+]], !DIExpression(), [[DBG54]])
+; DEBUGLOC-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[FOR_INC]], !dbg [[DBG61:![0-9]+]]
; DEBUGLOC: [[IF_THEN]]:
-; DEBUGLOC-NEXT: [[TMP4:%.*]] = udiv i64 [[N]], [[I]], !dbg [[DBG59]]
-; DEBUGLOC-NEXT: #dbg_value(i64 [[TMP4]], [[META51:![0-9]+]], !DIExpression(), [[DBG59]])
-; DEBUGLOC-NEXT: br label %[[FOR_INC]], !dbg [[DBG66:![0-9]+]]
+; DEBUGLOC-NEXT: [[TMP4:%.*]] = udiv i64 [[N]], [[I]], !dbg [[DBG55]]
+; DEBUGLOC-NEXT: #dbg_value(i64 [[TMP4]], [[META47:![0-9]+]], !DIExpression(), [[DBG55]])
+; DEBUGLOC-NEXT: br label %[[FOR_INC]], !dbg [[DBG62:![0-9]+]]
; DEBUGLOC: [[FOR_INC]]:
-; DEBUGLOC-NEXT: [[D:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[TMP4]], %[[IF_THEN]] ], !dbg [[DBG60]]
-; DEBUGLOC-NEXT: #dbg_value(i64 [[D]], [[META52:![0-9]+]], !DIExpression(), [[DBG60]])
-; DEBUGLOC-NEXT: [[IDX:%.*]] = getelementptr i64, ptr [[X]], i64 [[I]], !dbg [[DBG61]]
-; DEBUGLOC-NEXT: #dbg_value(ptr [[IDX]], [[META53:![0-9]+]], !DIExpression(), [[DBG61]])
-; DEBUGLOC-NEXT: store i64 [[D]], ptr [[IDX]], align 8, !dbg [[DBG62]]
-; DEBUGLOC-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1, !dbg [[DBG67:![0-9]+]]
-; DEBUGLOC-NEXT: #dbg_value(i64 [[I_NEXT]], [[META54:![0-9]+]], !DIExpression(), [[DBG67]])
-; DEBUGLOC-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]], !dbg [[DBG68:![0-9]+]]
-; DEBUGLOC-NEXT: #dbg_value(i1 [[COND]], [[META55:![0-9]+]], !DIExpression(), [[DBG68]])
-; DEBUGLOC-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END]], !dbg [[DBG63]], !llvm.loop [[LOOP69:![0-9]+]]
+; DEBUGLOC-NEXT: [[D:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[TMP4]], %[[IF_THEN]] ], !dbg [[DBG56]]
+; DEBUGLOC-NEXT: #dbg_value(i64 [[D]], [[META48:![0-9]+]], !DIExpression(), [[DBG56]])
+; DEBUGLOC-NEXT: [[IDX:%.*]] = getelementptr i64, ptr [[X]], i64 [[I]], !dbg [[DBG57]]
+; DEBUGLOC-NEXT: #dbg_value(ptr [[IDX]], [[META49:![0-9]+]], !DIExpression(), [[DBG57]])
+; DEBUGLOC-NEXT: store i64 [[D]], ptr [[IDX]], align 8, !dbg [[DBG58]]
+; DEBUGLOC-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1, !dbg [[DBG63:![0-9]+]]
+; DEBUGLOC-NEXT: #dbg_value(i64 [[I_NEXT]], [[META50:![0-9]+]], !DIExpression(), [[DBG63]])
+; DEBUGLOC-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]], !dbg [[DBG64:![0-9]+]]
+; DEBUGLOC-NEXT: #dbg_value(i1 [[COND]], [[META51:![0-9]+]], !DIExpression(), [[DBG64]])
+; DEBUGLOC-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END]], !dbg [[DBG59]], !llvm.loop [[LOOP65:![0-9]+]]
; DEBUGLOC: [[FOR_END]]:
-; DEBUGLOC-NEXT: ret void, !dbg [[DBG70:![0-9]+]]
+; DEBUGLOC-NEXT: ret void, !dbg [[DBG66:![0-9]+]]
;
entry:
br label %for.body
@@ -415,7 +387,7 @@ define void @scalar_cast_dbg(ptr nocapture %a, i32 %start, i64 %k) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -429,57 +401,57 @@ define void @scalar_cast_dbg(ptr nocapture %a, i32 %start, i64 %k) {
; CHECK-NEXT: store i32 [[TRUNC_IV]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[K]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
; DEBUGLOC-LABEL: define void @scalar_cast_dbg(
-; DEBUGLOC-SAME: ptr captures(none) [[A:%.*]], i32 [[START:%.*]], i64 [[K:%.*]]) !dbg [[DBG71:![0-9]+]] {
+; DEBUGLOC-SAME: ptr captures(none) [[A:%.*]], i32 [[START:%.*]], i64 [[K:%.*]]) !dbg [[DBG67:![0-9]+]] {
; DEBUGLOC-NEXT: [[ENTRY:.*]]:
-; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[K]], 4, !dbg [[DBG78:![0-9]+]]
-; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]], !dbg [[DBG78]]
+; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[K]], 4, !dbg [[DBG74:![0-9]+]]
+; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]], !dbg [[DBG74]]
; DEBUGLOC: [[VECTOR_SCEVCHECK]]:
-; DEBUGLOC-NEXT: [[TMP0:%.*]] = add i64 [[K]], -1, !dbg [[DBG78]]
-; DEBUGLOC-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32, !dbg [[DBG78]]
-; DEBUGLOC-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0, !dbg [[DBG78]]
-; DEBUGLOC-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[TMP0]], 4294967295, !dbg [[DBG78]]
-; DEBUGLOC-NEXT: [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]], !dbg [[DBG78]]
-; DEBUGLOC-NEXT: br i1 [[TMP4]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]], !dbg [[DBG79:![0-9]+]]
+; DEBUGLOC-NEXT: [[TMP0:%.*]] = add i64 [[K]], -1, !dbg [[DBG74]]
+; DEBUGLOC-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32, !dbg [[DBG74]]
+; DEBUGLOC-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0, !dbg [[DBG74]]
+; DEBUGLOC-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[TMP0]], 4294967295, !dbg [[DBG74]]
+; DEBUGLOC-NEXT: [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]], !dbg [[DBG74]]
+; DEBUGLOC-NEXT: br i1 [[TMP4]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]], !dbg [[DBG75:![0-9]+]]
; DEBUGLOC: [[VECTOR_PH]]:
; DEBUGLOC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[K]], 4
; DEBUGLOC-NEXT: [[N_VEC:%.*]] = sub i64 [[K]], [[N_MOD_VF]]
-; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG79]]
+; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG75]]
; DEBUGLOC: [[VECTOR_BODY]]:
-; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG79]]
-; DEBUGLOC-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG80:![0-9]+]]
-; DEBUGLOC-NEXT: [[TMP5:%.*]] = trunc i64 [[INDEX]] to i32, !dbg [[DBG80]]
-; DEBUGLOC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP5]], !dbg [[DBG81:![0-9]+]]
-; DEBUGLOC-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP6]], align 4, !dbg [[DBG82:![0-9]+]]
-; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG79]]
-; DEBUGLOC-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4), !dbg [[DBG80]]
-; DEBUGLOC-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG83:![0-9]+]]
-; DEBUGLOC-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG83]], !llvm.loop [[LOOP84:![0-9]+]]
+; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG75]]
+; DEBUGLOC-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG76:![0-9]+]]
+; DEBUGLOC-NEXT: [[TMP5:%.*]] = trunc i64 [[INDEX]] to i32, !dbg [[DBG76]]
+; DEBUGLOC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP5]], !dbg [[DBG77:![0-9]+]]
+; DEBUGLOC-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP6]], align 4, !dbg [[DBG78:![0-9]+]]
+; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG75]]
+; DEBUGLOC-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4), !dbg [[DBG76]]
+; DEBUGLOC-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG79:![0-9]+]]
+; DEBUGLOC-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG79]], !llvm.loop [[LOOP80:![0-9]+]]
; DEBUGLOC: [[MIDDLE_BLOCK]]:
-; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]], !dbg [[DBG83]]
-; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG83]]
+; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]], !dbg [[DBG79]]
+; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG79]]
; DEBUGLOC: [[SCALAR_PH]]:
-; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], !dbg [[DBG79]]
-; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG78]]
+; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], !dbg [[DBG75]]
+; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG74]]
; DEBUGLOC: [[LOOP]]:
-; DEBUGLOC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG79]]
-; DEBUGLOC-NEXT: #dbg_value(i64 [[IV]], [[META73:![0-9]+]], !DIExpression(), [[DBG79]])
-; DEBUGLOC-NEXT: [[TRUNC_IV:%.*]] = trunc i64 [[IV]] to i32, !dbg [[DBG80]]
-; DEBUGLOC-NEXT: #dbg_value(i32 [[TRUNC_IV]], [[META74:![0-9]+]], !DIExpression(), [[DBG80]])
-; DEBUGLOC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TRUNC_IV]], !dbg [[DBG81]]
-; DEBUGLOC-NEXT: #dbg_value(ptr [[ARRAYIDX]], [[META75:![0-9]+]], !DIExpression(), [[DBG81]])
-; DEBUGLOC-NEXT: store i32 [[TRUNC_IV]], ptr [[ARRAYIDX]], align 4, !dbg [[DBG82]]
-; DEBUGLOC-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1, !dbg [[DBG85:![0-9]+]]
-; DEBUGLOC-NEXT: #dbg_value(i64 [[IV_NEXT]], [[META76:![0-9]+]], !DIExpression(), [[DBG85]])
-; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[K]], !dbg [[DBG86:![0-9]+]]
-; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META77:![0-9]+]], !DIExpression(), [[DBG86]])
-; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG83]], !llvm.loop [[LOOP87:![0-9]+]]
+; DEBUGLOC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG75]]
+; DEBUGLOC-NEXT: #dbg_value(i64 [[IV]], [[META69:![0-9]+]], !DIExpression(), [[DBG75]])
+; DEBUGLOC-NEXT: [[TRUNC_IV:%.*]] = trunc i64 [[IV]] to i32, !dbg [[DBG76]]
+; DEBUGLOC-NEXT: #dbg_value(i32 [[TRUNC_IV]], [[META70:![0-9]+]], !DIExpression(), [[DBG76]])
+; DEBUGLOC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TRUNC_IV]], !dbg [[DBG77]]
+; DEBUGLOC-NEXT: #dbg_value(ptr [[ARRAYIDX]], [[META71:![0-9]+]], !DIExpression(), [[DBG77]])
+; DEBUGLOC-NEXT: store i32 [[TRUNC_IV]], ptr [[ARRAYIDX]], align 4, !dbg [[DBG78]]
+; DEBUGLOC-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1, !dbg [[DBG81:![0-9]+]]
+; DEBUGLOC-NEXT: #dbg_value(i64 [[IV_NEXT]], [[META72:![0-9]+]], !DIExpression(), [[DBG81]])
+; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[K]], !dbg [[DBG82:![0-9]+]]
+; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META73:![0-9]+]], !DIExpression(), [[DBG82]])
+; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG79]], !llvm.loop [[LOOP83:![0-9]+]]
; DEBUGLOC: [[EXIT]]:
-; DEBUGLOC-NEXT: ret void, !dbg [[DBG88:![0-9]+]]
+; DEBUGLOC-NEXT: ret void, !dbg [[DBG84:![0-9]+]]
;
entry:
br label %loop
@@ -522,7 +494,7 @@ define void @widen_intrinsic_dbg(i64 %n, ptr %y, ptr %x) {
; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[TMP3]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -538,60 +510,60 @@ define void @widen_intrinsic_dbg(i64 %n, ptr %y, ptr %x) {
; CHECK-NEXT: store float [[CALL]], ptr [[GEP_X]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
; DEBUGLOC-LABEL: define void @widen_intrinsic_dbg(
-; DEBUGLOC-SAME: i64 [[N:%.*]], ptr [[Y:%.*]], ptr [[X:%.*]]) !dbg [[DBG89:![0-9]+]] {
+; DEBUGLOC-SAME: i64 [[N:%.*]], ptr [[Y:%.*]], ptr [[X:%.*]]) !dbg [[DBG85:![0-9]+]] {
; DEBUGLOC-NEXT: [[ENTRY:.*]]:
-; DEBUGLOC-NEXT: [[Y2:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG98:![0-9]+]]
-; DEBUGLOC-NEXT: [[X1:%.*]] = ptrtoint ptr [[X]] to i64, !dbg [[DBG98]]
-; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4, !dbg [[DBG98]]
-; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]], !dbg [[DBG98]]
+; DEBUGLOC-NEXT: [[Y2:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG94:![0-9]+]]
+; DEBUGLOC-NEXT: [[X1:%.*]] = ptrtoint ptr [[X]] to i64, !dbg [[DBG94]]
+; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4, !dbg [[DBG94]]
+; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]], !dbg [[DBG94]]
; DEBUGLOC: [[VECTOR_MEMCHECK]]:
-; DEBUGLOC-NEXT: [[TMP0:%.*]] = sub i64 [[X1]], [[Y2]], !dbg [[DBG98]]
-; DEBUGLOC-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16, !dbg [[DBG98]]
-; DEBUGLOC-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]], !dbg [[DBG99:![0-9]+]]
+; DEBUGLOC-NEXT: [[TMP0:%.*]] = sub i64 [[X1]], [[Y2]], !dbg [[DBG94]]
+; DEBUGLOC-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16, !dbg [[DBG94]]
+; DEBUGLOC-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]], !dbg [[DBG95:![0-9]+]]
; DEBUGLOC: [[VECTOR_PH]]:
; DEBUGLOC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
; DEBUGLOC-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG99]]
+; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG95]]
; DEBUGLOC: [[VECTOR_BODY]]:
-; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG99]]
-; DEBUGLOC-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDEX]], !dbg [[DBG100:![0-9]+]]
-; DEBUGLOC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4, !dbg [[DBG101:![0-9]+]]
-; DEBUGLOC-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> [[WIDE_LOAD]]), !dbg [[DBG102:![0-9]+]]
-; DEBUGLOC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDEX]], !dbg [[DBG103:![0-9]+]]
-; DEBUGLOC-NEXT: store <4 x float> [[TMP2]], ptr [[TMP3]], align 4, !dbg [[DBG104:![0-9]+]]
-; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG99]]
-; DEBUGLOC-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG105:![0-9]+]]
-; DEBUGLOC-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG105]], !llvm.loop [[LOOP106:![0-9]+]]
+; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG95]]
+; DEBUGLOC-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDEX]], !dbg [[DBG96:![0-9]+]]
+; DEBUGLOC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4, !dbg [[DBG97:![0-9]+]]
+; DEBUGLOC-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> [[WIDE_LOAD]]), !dbg [[DBG98:![0-9]+]]
+; DEBUGLOC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDEX]], !dbg [[DBG99:![0-9]+]]
+; DEBUGLOC-NEXT: store <4 x float> [[TMP2]], ptr [[TMP3]], align 4, !dbg [[DBG100:![0-9]+]]
+; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG95]]
+; DEBUGLOC-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG101:![0-9]+]]
+; DEBUGLOC-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG101]], !llvm.loop [[LOOP102:![0-9]+]]
; DEBUGLOC: [[MIDDLE_BLOCK]]:
-; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]], !dbg [[DBG105]]
-; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG105]]
+; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]], !dbg [[DBG101]]
+; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG101]]
; DEBUGLOC: [[SCALAR_PH]]:
-; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ], !dbg [[DBG99]]
-; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG98]]
+; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ], !dbg [[DBG95]]
+; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG94]]
; DEBUGLOC: [[LOOP]]:
-; DEBUGLOC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG99]]
-; DEBUGLOC-NEXT: #dbg_value(i64 [[IV]], [[META91:![0-9]+]], !DIExpression(), [[DBG99]])
-; DEBUGLOC-NEXT: [[GEP_Y:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[IV]], !dbg [[DBG100]]
-; DEBUGLOC-NEXT: #dbg_value(ptr [[GEP_Y]], [[META92:![0-9]+]], !DIExpression(), [[DBG100]])
-; DEBUGLOC-NEXT: [[LOAD:%.*]] = load float, ptr [[GEP_Y]], align 4, !dbg [[DBG101]]
-; DEBUGLOC-NEXT: #dbg_value(float [[LOAD]], [[META93:![0-9]+]], !DIExpression(), [[DBG101]])
-; DEBUGLOC-NEXT: [[CALL:%.*]] = call float @llvm.sqrt.f32(float [[LOAD]]), !dbg [[DBG102]]
-; DEBUGLOC-NEXT: #dbg_value(float [[CALL]], [[META94:![0-9]+]], !DIExpression(), [[DBG102]])
-; DEBUGLOC-NEXT: [[GEP_X:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[IV]], !dbg [[DBG103]]
-; DEBUGLOC-NEXT: #dbg_value(ptr [[GEP_X]], [[META95:![0-9]+]], !DIExpression(), [[DBG103]])
-; DEBUGLOC-NEXT: store float [[CALL]], ptr [[GEP_X]], align 4, !dbg [[DBG104]]
-; DEBUGLOC-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !dbg [[DBG107:![0-9]+]]
-; DEBUGLOC-NEXT: #dbg_value(i64 [[IV_NEXT]], [[META96:![0-9]+]], !DIExpression(), [[DBG107]])
-; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]], !dbg [[DBG108:![0-9]+]]
-; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META97:![0-9]+]], !DIExpression(), [[DBG108]])
-; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG105]], !llvm.loop [[LOOP109:![0-9]+]]
+; DEBUGLOC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG95]]
+; DEBUGLOC-NEXT: #dbg_value(i64 [[IV]], [[META87:![0-9]+]], !DIExpression(), [[DBG95]])
+; DEBUGLOC-NEXT: [[GEP_Y:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[IV]], !dbg [[DBG96]]
+; DEBUGLOC-NEXT: #dbg_value(ptr [[GEP_Y]], [[META88:![0-9]+]], !DIExpression(), [[DBG96]])
+; DEBUGLOC-NEXT: [[LOAD:%.*]] = load float, ptr [[GEP_Y]], align 4, !dbg [[DBG97]]
+; DEBUGLOC-NEXT: #dbg_value(float [[LOAD]], [[META89:![0-9]+]], !DIExpression(), [[DBG97]])
+; DEBUGLOC-NEXT: [[CALL:%.*]] = call float @llvm.sqrt.f32(float [[LOAD]]), !dbg [[DBG98]]
+; DEBUGLOC-NEXT: #dbg_value(float [[CALL]], [[META90:![0-9]+]], !DIExpression(), [[DBG98]])
+; DEBUGLOC-NEXT: [[GEP_X:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[IV]], !dbg [[DBG99]]
+; DEBUGLOC-NEXT: #dbg_value(ptr [[GEP_X]], [[META91:![0-9]+]], !DIExpression(), [[DBG99]])
+; DEBUGLOC-NEXT: store float [[CALL]], ptr [[GEP_X]], align 4, !dbg [[DBG100]]
+; DEBUGLOC-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !dbg [[DBG103:![0-9]+]]
+; DEBUGLOC-NEXT: #dbg_value(i64 [[IV_NEXT]], [[META92:![0-9]+]], !DIExpression(), [[DBG103]])
+; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]], !dbg [[DBG104:![0-9]+]]
+; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META93:![0-9]+]], !DIExpression(), [[DBG104]])
+; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG101]], !llvm.loop [[LOOP105:![0-9]+]]
; DEBUGLOC: [[EXIT]]:
-; DEBUGLOC-NEXT: ret void, !dbg [[DBG110:![0-9]+]]
+; DEBUGLOC-NEXT: ret void, !dbg [[DBG106:![0-9]+]]
;
entry:
br label %loop
@@ -618,23 +590,21 @@ exit:
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]}
-; CHECK: [[META4]] = !{!"llvm.loop.vectorize.width", i32 4}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]], [[META1]]}
; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]}
; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]}
-; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]}
+; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]]}
; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]}
; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]]}
-; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]}
-; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]]}
;.
; DEBUGLOC: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C, file: [[META1:![0-9]+]], producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
; DEBUGLOC: [[META1]] = !DIFile(filename: "{{.*}}<stdin>", directory: {{.*}})
; DEBUGLOC: [[DBG5]] = distinct !DISubprogram(name: "_Z3fooPf", linkageName: "_Z3fooPf", scope: null, file: [[META1]], line: 1, type: [[META6:![0-9]+]], scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META8:![0-9]+]])
; DEBUGLOC: [[META6]] = !DISubroutineType(types: [[META7:![0-9]+]])
; DEBUGLOC: [[META7]] = !{}
-; DEBUGLOC: [[META8]] = !{[[META9]], [[META11]], [[META12]], [[META14]], [[META15]], [[META16]]}
+; DEBUGLOC: [[META8]] = !{[[META9:![0-9]+]], [[META11:![0-9]+]], [[META12:![0-9]+]], [[META14:![0-9]+]], [[META15:![0-9]+]], [[META16:![0-9]+]]}
; DEBUGLOC: [[META9]] = !DILocalVariable(name: "1", scope: [[DBG5]], file: [[META1]], line: 2, type: [[META10:![0-9]+]])
; DEBUGLOC: [[META10]] = !DIBasicType(name: "ty64", size: 64, encoding: DW_ATE_unsigned)
; DEBUGLOC: [[META11]] = !DILocalVariable(name: "2", scope: [[DBG5]], file: [[META1]], line: 3, type: [[META10]])
@@ -654,87 +624,83 @@ exit:
; DEBUGLOC: [[LOOP25]] = distinct !{[[LOOP25]], [[META26:![0-9]+]], [[META27:![0-9]+]]}
; DEBUGLOC: [[META26]] = !{!"llvm.loop.isvectorized", i32 1}
; DEBUGLOC: [[META27]] = !{!"llvm.loop.unroll.runtime.disable"}
-; DEBUGLOC: [[DBG28]] = !DILocation(line: 7, column: 1, scope: [[DBG5]])
-; DEBUGLOC: [[DBG29]] = !DILocation(line: 8, column: 1, scope: [[DBG5]])
-; DEBUGLOC: [[LOOP30]] = distinct !{[[LOOP30]], [[META31:![0-9]+]]}
-; DEBUGLOC: [[META31]] = !{!"llvm.loop.vectorize.width", i32 4}
-; DEBUGLOC: [[DBG32]] = !DILocation(line: 10, column: 1, scope: [[DBG5]])
-; DEBUGLOC: [[DBG33]] = distinct !DISubprogram(name: "widen_ptr_induction_dbg", linkageName: "widen_ptr_induction_dbg", scope: null, file: [[META1]], line: 11, type: [[META6]], scopeLine: 11, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META34:![0-9]+]])
-; DEBUGLOC: [[META34]] = !{[[META35]], [[META36]], [[META37]]}
-; DEBUGLOC: [[META35]] = !DILocalVariable(name: "7", scope: [[DBG33]], file: [[META1]], line: 12, type: [[META10]])
-; DEBUGLOC: [[META36]] = !DILocalVariable(name: "8", scope: [[DBG33]], file: [[META1]], line: 13, type: [[META10]])
-; DEBUGLOC: [[META37]] = !DILocalVariable(name: "9", scope: [[DBG33]], file: [[META1]], line: 15, type: [[META17]])
-; DEBUGLOC: [[DBG38]] = !DILocation(line: 11, column: 1, scope: [[DBG33]])
-; DEBUGLOC: [[DBG39]] = !DILocation(line: 12, column: 1, scope: [[DBG33]])
-; DEBUGLOC: [[DBG40]] = !DILocation(line: 14, column: 1, scope: [[DBG33]])
-; DEBUGLOC: [[DBG41]] = !DILocation(line: 16, column: 1, scope: [[DBG33]])
-; DEBUGLOC: [[LOOP42]] = distinct !{[[LOOP42]], [[META26]], [[META27]]}
-; DEBUGLOC: [[DBG43]] = !DILocation(line: 13, column: 1, scope: [[DBG33]])
-; DEBUGLOC: [[DBG44]] = !DILocation(line: 15, column: 1, scope: [[DBG33]])
-; DEBUGLOC: [[LOOP45]] = distinct !{[[LOOP45]], [[META27]], [[META26]]}
-; DEBUGLOC: [[DBG46]] = !DILocation(line: 17, column: 1, scope: [[DBG33]])
-; DEBUGLOC: [[DBG47]] = distinct !DISubprogram(name: "predicated_phi_dbg", linkageName: "predicated_phi_dbg", scope: null, file: [[META1]], line: 18, type: [[META6]], scopeLine: 18, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META48:![0-9]+]])
-; DEBUGLOC: [[META48]] = !{[[META49]], [[META50]], [[META51]], [[META52]], [[META53]], [[META54]], [[META55]]}
-; DEBUGLOC: [[META49]] = !DILocalVariable(name: "10", scope: [[DBG47]], file: [[META1]], line: 19, type: [[META10]])
-; DEBUGLOC: [[META50]] = !DILocalVariable(name: "11", scope: [[DBG47]], file: [[META1]], line: 20, type: [[META17]])
-; DEBUGLOC: [[META51]] = !DILocalVariable(name: "12", scope: [[DBG47]], file: [[META1]], line: 22, type: [[META10]])
-; DEBUGLOC: [[META52]] = !DILocalVariable(name: "13", scope: [[DBG47]], file: [[META1]], line: 24, type: [[META10]])
-; DEBUGLOC: [[META53]] = !DILocalVariable(name: "14", scope: [[DBG47]], file: [[META1]], line: 25, type: [[META10]])
-; DEBUGLOC: [[META54]] = !DILocalVariable(name: "15", scope: [[DBG47]], file: [[META1]], line: 27, type: [[META10]])
-; DEBUGLOC: [[META55]] = !DILocalVariable(name: "16", scope: [[DBG47]], file: [[META1]], line: 28, type: [[META17]])
-; DEBUGLOC: [[DBG56]] = !DILocation(line: 18, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[DBG57]] = !DILocation(line: 19, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[DBG58]] = !DILocation(line: 20, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[DBG59]] = !DILocation(line: 22, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[DBG60]] = !DILocation(line: 24, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[DBG61]] = !DILocation(line: 25, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[DBG62]] = !DILocation(line: 26, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[DBG63]] = !DILocation(line: 29, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[LOOP64]] = distinct !{[[LOOP64]], [[META26]], [[META27]]}
-; DEBUGLOC: [[DBG65]] = !DILocation(line: 21, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[DBG66]] = !DILocation(line: 23, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[DBG67]] = !DILocation(line: 27, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[DBG68]] = !DILocation(line: 28, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[LOOP69]] = distinct !{[[LOOP69]], [[META27]], [[META26]]}
-; DEBUGLOC: [[DBG70]] = !DILocation(line: 30, column: 1, scope: [[DBG47]])
-; DEBUGLOC: [[DBG71]] = distinct !DISubprogram(name: "scalar_cast_dbg", linkageName: "scalar_cast_dbg", scope: null, file: [[META1]], line: 31, type: [[META6]], scopeLine: 31, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META72:![0-9]+]])
-; DEBUGLOC: [[META72]] = !{[[META73]], [[META74]], [[META75]], [[META76]], [[META77]]}
-; DEBUGLOC: [[META73]] = !DILocalVariable(name: "17", scope: [[DBG71]], file: [[META1]], line: 32, type: [[META10]])
-; DEBUGLOC: [[META74]] = !DILocalVariable(name: "18", scope: [[DBG71]], file: [[META1]], line: 33, type: [[META13]])
-; DEBUGLOC: [[META75]] = !DILocalVariable(name: "19", scope: [[DBG71]], file: [[META1]], line: 34, type: [[META10]])
-; DEBUGLOC: [[META76]] = !DILocalVariable(name: "20", scope: [[DBG71]], file: [[META1]], line: 36, type: [[META10]])
-; DEBUGLOC: [[META77]] = !DILocalVariable(name: "21", scope: [[DBG71]], file: [[META1]], line: 37, type: [[META17]])
-; DEBUGLOC: [[DBG78]] = !DILocation(line: 31, column: 1, scope: [[DBG71]])
-; DEBUGLOC: [[DBG79]] = !DILocation(line: 32, column: 1, scope: [[DBG71]])
-; DEBUGLOC: [[DBG80]] = !DILocation(line: 33, column: 1, scope: [[DBG71]])
-; DEBUGLOC: [[DBG81]] = !DILocation(line: 34, column: 1, scope: [[DBG71]])
-; DEBUGLOC: [[DBG82]] = !DILocation(line: 35, column: 1, scope: [[DBG71]])
-; DEBUGLOC: [[DBG83]] = !DILocation(line: 38, column: 1, scope: [[DBG71]])
-; DEBUGLOC: [[LOOP84]] = distinct !{[[LOOP84]], [[META26]], [[META27]]}
-; DEBUGLOC: [[DBG85]] = !DILocation(line: 36, column: 1, scope: [[DBG71]])
-; DEBUGLOC: [[DBG86]] = !DILocation(line: 37, column: 1, scope: [[DBG71]])
-; DEBUGLOC: [[LOOP87]] = distinct !{[[LOOP87]], [[META26]]}
-; DEBUGLOC: [[DBG88]] = !DILocation(line: 39, column: 1, scope: [[DBG71]])
-; DEBUGLOC: [[DBG89]] = distinct !DISubprogram(name: "widen_intrinsic_dbg", linkageName: "widen_intrinsic_dbg", scope: null, file: [[META1]], line: 40, type: [[META6]], scopeLine: 40, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META90:![0-9]+]])
-; DEBUGLOC: [[META90]] = !{[[META91]], [[META92]], [[META93]], [[META94]], [[META95]], [[META96]], [[META97]]}
-; DEBUGLOC: [[META91]] = !DILocalVariable(name: "22", scope: [[DBG89]], file: [[META1]], line: 41, type: [[META10]])
-; DEBUGLOC: [[META92]] = !DILocalVariable(name: "23", scope: [[DBG89]], file: [[META1]], line: 42, type: [[META10]])
-; DEBUGLOC: [[META93]] = !DILocalVariable(name: "24", scope: [[DBG89]], file: [[META1]], line: 43, type: [[META13]])
-; DEBUGLOC: [[META94]] = !DILocalVariable(name: "25", scope: [[DBG89]], file: [[META1]], line: 44, type: [[META13]])
-; DEBUGLOC: [[META95]] = !DILocalVariable(name: "26", scope: [[DBG89]], file: [[META1]], line: 45, type: [[META10]])
-; DEBUGLOC: [[META96]] = !DILocalVariable(name: "27", scope: [[DBG89]], file: [[META1]], line: 47, type: [[META10]])
-; DEBUGLOC: [[META97]] = !DILocalVariable(name: "28", scope: [[DBG89]], file: [[META1]], line: 48, type: [[META17]])
-; DEBUGLOC: [[DBG98]] = !DILocation(line: 40, column: 1, scope: [[DBG89]])
-; DEBUGLOC: [[DBG99]] = !DILocation(line: 41, column: 1, scope: [[DBG89]])
-; DEBUGLOC: [[DBG100]] = !DILocation(line: 42, column: 1, scope: [[DBG89]])
-; DEBUGLOC: [[DBG101]] = !DILocation(line: 43, column: 1, scope: [[DBG89]])
-; DEBUGLOC: [[DBG102]] = !DILocation(line: 44, column: 1, scope: [[DBG89]])
-; DEBUGLOC: [[DBG103]] = !DILocation(line: 45, column: 1, scope: [[DBG89]])
-; DEBUGLOC: [[DBG104]] = !DILocation(line: 46, column: 1, scope: [[DBG89]])
-; DEBUGLOC: [[DBG105]] = !DILocation(line: 49, column: 1, scope: [[DBG89]])
-; DEBUGLOC: [[LOOP106]] = distinct !{[[LOOP106]], [[META26]], [[META27]]}
-; DEBUGLOC: [[DBG107]] = !DILocation(line: 47, column: 1, scope: [[DBG89]])
-; DEBUGLOC: [[DBG108]] = !DILocation(line: 48, column: 1, scope: [[DBG89]])
-; DEBUGLOC: [[LOOP109]] = distinct !{[[LOOP109]], [[META26]]}
-; DEBUGLOC: [[DBG110]] = !DILocation(line: 50, column: 1, scope: [[DBG89]])
+; DEBUGLOC: [[DBG28]] = !DILocation(line: 10, column: 1, scope: [[DBG5]])
+; DEBUGLOC: [[DBG29]] = distinct !DISubprogram(name: "widen_ptr_induction_dbg", linkageName: "widen_ptr_induction_dbg", scope: null, file: [[META1]], line: 11, type: [[META6]], scopeLine: 11, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META30:![0-9]+]])
+; DEBUGLOC: [[META30]] = !{[[META31]], [[META32]], [[META33]]}
+; DEBUGLOC: [[META31]] = !DILocalVariable(name: "7", scope: [[DBG29]], file: [[META1]], line: 12, type: [[META10]])
+; DEBUGLOC: [[META32]] = !DILocalVariable(name: "8", scope: [[DBG29]], file: [[META1]], line: 13, type: [[META10]])
+; DEBUGLOC: [[META33]] = !DILocalVariable(name: "9", scope: [[DBG29]], file: [[META1]], line: 15, type: [[META17]])
+; DEBUGLOC: [[DBG34]] = !DILocation(line: 11, column: 1, scope: [[DBG29]])
+; DEBUGLOC: [[DBG35]] = !DILocation(line: 12, column: 1, scope: [[DBG29]])
+; DEBUGLOC: [[DBG36]] = !DILocation(line: 14, column: 1, scope: [[DBG29]])
+; DEBUGLOC: [[DBG37]] = !DILocation(line: 16, column: 1, scope: [[DBG29]])
+; DEBUGLOC: [[LOOP38]] = distinct !{[[LOOP38]], [[META26]], [[META27]]}
+; DEBUGLOC: [[DBG39]] = !DILocation(line: 13, column: 1, scope: [[DBG29]])
+; DEBUGLOC: [[DBG40]] = !DILocation(line: 15, column: 1, scope: [[DBG29]])
+; DEBUGLOC: [[LOOP41]] = distinct !{[[LOOP41]], [[META27]], [[META26]]}
+; DEBUGLOC: [[DBG42]] = !DILocation(line: 17, column: 1, scope: [[DBG29]])
+; DEBUGLOC: [[DBG43]] = distinct !DISubprogram(name: "predicated_phi_dbg", linkageName: "predicated_phi_dbg", scope: null, file: [[META1]], line: 18, type: [[META6]], scopeLine: 18, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META44:![0-9]+]])
+; DEBUGLOC: [[META44]] = !{[[META45]], [[META46]], [[META47]], [[META48]], [[META49]], [[META50]], [[META51]]}
+; DEBUGLOC: [[META45]] = !DILocalVariable(name: "10", scope: [[DBG43]], file: [[META1]], line: 19, type: [[META10]])
+; DEBUGLOC: [[META46]] = !DILocalVariable(name: "11", scope: [[DBG43]], file: [[META1]], line: 20, type: [[META17]])
+; DEBUGLOC: [[META47]] = !DILocalVariable(name: "12", scope: [[DBG43]], file: [[META1]], line: 22, type: [[META10]])
+; DEBUGLOC: [[META48]] = !DILocalVariable(name: "13", scope: [[DBG43]], file: [[META1]], line: 24, type: [[META10]])
+; DEBUGLOC: [[META49]] = !DILocalVariable(name: "14", scope: [[DBG43]], file: [[META1]], line: 25, type: [[META10]])
+; DEBUGLOC: [[META50]] = !DILocalVariable(name: "15", scope: [[DBG43]], file: [[META1]], line: 27, type: [[META10]])
+; DEBUGLOC: [[META51]] = !DILocalVariable(name: "16", scope: [[DBG43]], file: [[META1]], line: 28, type: [[META17]])
+; DEBUGLOC: [[DBG52]] = !DILocation(line: 18, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[DBG53]] = !DILocation(line: 19, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[DBG54]] = !DILocation(line: 20, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[DBG55]] = !DILocation(line: 22, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[DBG56]] = !DILocation(line: 24, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[DBG57]] = !DILocation(line: 25, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[DBG58]] = !DILocation(line: 26, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[DBG59]] = !DILocation(line: 29, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[LOOP60]] = distinct !{[[LOOP60]], [[META26]], [[META27]]}
+; DEBUGLOC: [[DBG61]] = !DILocation(line: 21, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[DBG62]] = !DILocation(line: 23, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[DBG63]] = !DILocation(line: 27, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[DBG64]] = !DILocation(line: 28, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[LOOP65]] = distinct !{[[LOOP65]], [[META27]], [[META26]]}
+; DEBUGLOC: [[DBG66]] = !DILocation(line: 30, column: 1, scope: [[DBG43]])
+; DEBUGLOC: [[DBG67]] = distinct !DISubprogram(name: "scalar_cast_dbg", linkageName: "scalar_cast_dbg", scope: null, file: [[META1]], line: 31, type: [[META6]], scopeLine: 31, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META68:![0-9]+]])
+; DEBUGLOC: [[META68]] = !{[[META69]], [[META70]], [[META71]], [[META72]], [[META73]]}
+; DEBUGLOC: [[META69]] = !DILocalVariable(name: "17", scope: [[DBG67]], file: [[META1]], line: 32, type: [[META10]])
+; DEBUGLOC: [[META70]] = !DILocalVariable(name: "18", scope: [[DBG67]], file: [[META1]], line: 33, type: [[META13]])
+; DEBUGLOC: [[META71]] = !DILocalVariable(name: "19", scope: [[DBG67]], file: [[META1]], line: 34, type: [[META10]])
+; DEBUGLOC: [[META72]] = !DILocalVariable(name: "20", scope: [[DBG67]], file: [[META1]], line: 36, type: [[META10]])
+; DEBUGLOC: [[META73]] = !DILocalVariable(name: "21", scope: [[DBG67]], file: [[META1]], line: 37, type: [[META17]])
+; DEBUGLOC: [[DBG74]] = !DILocation(line: 31, column: 1, scope: [[DBG67]])
+; DEBUGLOC: [[DBG75]] = !DILocation(line: 32, column: 1, scope: [[DBG67]])
+; DEBUGLOC: [[DBG76]] = !DILocation(line: 33, column: 1, scope: [[DBG67]])
+; DEBUGLOC: [[DBG77]] = !DILocation(line: 34, column: 1, scope: [[DBG67]])
+; DEBUGLOC: [[DBG78]] = !DILocation(line: 35, column: 1, scope: [[DBG67]])
+; DEBUGLOC: [[DBG79]] = !DILocation(line: 38, column: 1, scope: [[DBG67]])
+; DEBUGLOC: [[LOOP80]] = distinct !{[[LOOP80]], [[META26]], [[META27]]}
+; DEBUGLOC: [[DBG81]] = !DILocation(line: 36, column: 1, scope: [[DBG67]])
+; DEBUGLOC: [[DBG82]] = !DILocation(line: 37, column: 1, scope: [[DBG67]])
+; DEBUGLOC: [[LOOP83]] = distinct !{[[LOOP83]], [[META26]]}
+; DEBUGLOC: [[DBG84]] = !DILocation(line: 39, column: 1, scope: [[DBG67]])
+; DEBUGLOC: [[DBG85]] = distinct !DISubprogram(name: "widen_intrinsic_dbg", linkageName: "widen_intrinsic_dbg", scope: null, file: [[META1]], line: 40, type: [[META6]], scopeLine: 40, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META86:![0-9]+]])
+; DEBUGLOC: [[META86]] = !{[[META87]], [[META88]], [[META89]], [[META90]], [[META91]], [[META92]], [[META93]]}
+; DEBUGLOC: [[META87]] = !DILocalVariable(name: "22", scope: [[DBG85]], file: [[META1]], line: 41, type: [[META10]])
+; DEBUGLOC: [[META88]] = !DILocalVariable(name: "23", scope: [[DBG85]], file: [[META1]], line: 42, type: [[META10]])
+; DEBUGLOC: [[META89]] = !DILocalVariable(name: "24", scope: [[DBG85]], file: [[META1]], line: 43, type: [[META13]])
+; DEBUGLOC: [[META90]] = !DILocalVariable(name: "25", scope: [[DBG85]], file: [[META1]], line: 44, type: [[META13]])
+; DEBUGLOC: [[META91]] = !DILocalVariable(name: "26", scope: [[DBG85]], file: [[META1]], line: 45, type: [[META10]])
+; DEBUGLOC: [[META92]] = !DILocalVariable(name: "27", scope: [[DBG85]], file: [[META1]], line: 47, type: [[META10]])
+; DEBUGLOC: [[META93]] = !DILocalVariable(name: "28", scope: [[DBG85]], file: [[META1]], line: 48, type: [[META17]])
+; DEBUGLOC: [[DBG94]] = !DILocation(line: 40, column: 1, scope: [[DBG85]])
+; DEBUGLOC: [[DBG95]] = !DILocation(line: 41, column: 1, scope: [[DBG85]])
+; DEBUGLOC: [[DBG96]] = !DILocation(line: 42, column: 1, scope: [[DBG85]])
+; DEBUGLOC: [[DBG97]] = !DILocation(line: 43, column: 1, scope: [[DBG85]])
+; DEBUGLOC: [[DBG98]] = !DILocation(line: 44, column: 1, scope: [[DBG85]])
+; DEBUGLOC: [[DBG99]] = !DILocation(line: 45, column: 1, scope: [[DBG85]])
+; DEBUGLOC: [[DBG100]] = !DILocation(line: 46, column: 1, scope: [[DBG85]])
+; DEBUGLOC: [[DBG101]] = !DILocation(line: 49, column: 1, scope: [[DBG85]])
+; DEBUGLOC: [[LOOP102]] = distinct !{[[LOOP102]], [[META26]], [[META27]]}
+; DEBUGLOC: [[DBG103]] = !DILocation(line: 47, column: 1, scope: [[DBG85]])
+; DEBUGLOC: [[DBG104]] = !DILocation(line: 48, column: 1, scope: [[DBG85]])
+; DEBUGLOC: [[LOOP105]] = distinct !{[[LOOP105]], [[META26]]}
+; DEBUGLOC: [[DBG106]] = !DILocation(line: 50, column: 1, scope: [[DBG85]])
;.
diff --git a/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll
index 57f0dc2..787fa31 100644
--- a/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll
@@ -22,7 +22,7 @@ loop:
%load = load i32, ptr %gep, align 4
%red.next = add i32 %red, %load
%iv.next = add i64 %iv, 1
- %exitcond = icmp eq i64 %iv.next, 256
+ %exitcond = icmp eq i64 %iv.next, 257
br i1 %exitcond, label %exit, label %loop
exit:
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll
index f20d492..73ddddc 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll
@@ -20,10 +20,6 @@ define i32 @reduction_smin(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 [[RDX_MINMAX]]
;
@@ -66,10 +62,6 @@ define i32 @reduction_smin_select_ops_flipped(ptr nocapture %A, ptr nocapture %B
; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 [[RDX_MINMAX]]
;
@@ -111,10 +103,6 @@ define i32 @reduction_smin_intrinsic(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP1]])
; CHECK-NEXT: ret i32 [[TMP3]]
@@ -159,10 +147,6 @@ define i32 @reduction_umax(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 [[RDX_MINMAX]]
;
@@ -205,10 +189,6 @@ define i32 @reduction_umax_select_ops_flipped(ptr nocapture %A, ptr nocapture %B
; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 [[RDX_MINMAX]]
;
@@ -250,10 +230,6 @@ define i32 @reduction_umax_intrinsic(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP1]])
; CHECK-NEXT: ret i32 [[TMP3]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
index 925290b..1b9dcad 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
@@ -61,11 +61,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: ret i32 [[TMP26]]
;
@@ -170,11 +166,7 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP49]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: ret i32 [[TMP48]]
;
@@ -263,11 +255,7 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: ret i32 [[TMP29]]
;
@@ -373,11 +361,7 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP49]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: ret i32 [[TMP48]]
;
@@ -485,11 +469,7 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP47]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: ret i32 [[TMP46]]
;
@@ -594,11 +574,7 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP46]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: ret i32 [[TMP45]]
;
@@ -701,11 +677,7 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP46]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 [[TMP45]]
;
@@ -806,11 +778,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 [[TMP43]]
;
@@ -911,11 +879,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 [[TMP43]]
;
@@ -1016,11 +980,7 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret float [[TMP43]]
;
@@ -1123,11 +1083,7 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP46]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret float [[TMP45]]
;
@@ -1211,11 +1167,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 [[RDX_MINMAX]]
;
@@ -1297,11 +1249,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret i32 [[RDX_MINMAX]]
;
@@ -1356,21 +1304,7 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) {
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[IF_THEN:%.*]], label [[FOR_INC:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: br i1 poison, label [[IF_THEN8:%.*]], label [[IF_ELSE:%.*]]
-; CHECK: if.then8:
-; CHECK-NEXT: br label [[FOR_INC]]
-; CHECK: if.else:
-; CHECK-NEXT: br i1 poison, label [[IF_THEN16:%.*]], label [[FOR_INC]]
-; CHECK: if.then16:
-; CHECK-NEXT: br label [[FOR_INC]]
-; CHECK: for.inc:
-; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]]
+; CHECK-NEXT: br label [[FOR_INC:%.*]]
; CHECK: for.end:
; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[PREDPHI3]])
; CHECK-NEXT: ret float [[SUM_1_LCSSA]]
@@ -1478,11 +1412,7 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: [[TMP32:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP30]], <4 x i32> [[VEC_PHI]]
; CHECK-NEXT: [[TMP33:%.*]] = trunc <4 x i32> [[TMP32]] to <4 x i8>
@@ -1572,11 +1502,7 @@ define i8 @reduction_and_trunc(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: [[TMP31:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP29]], <4 x i32> [[VEC_PHI]]
; CHECK-NEXT: [[TMP32:%.*]] = trunc <4 x i32> [[TMP31]] to <4 x i8>
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
index cad3ca1..183462f 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
@@ -35,11 +35,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP7]], [[TMP5]]
; CHECK-NEXT: [[BIN_RDX7:%.*]] = add i32 [[TMP9]], [[BIN_RDX]]
@@ -114,11 +110,7 @@ define i64 @reduction_sum_chain(ptr noalias %p, ptr noalias %q) {
; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: [[BIN_RDX:%.*]] = add i64 [[TMP19]], [[TMP17]]
; CHECK-NEXT: [[BIN_RDX11:%.*]] = add i64 [[TMP21]], [[BIN_RDX]]
@@ -345,11 +337,7 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP111:%.*]] = icmp eq i64 [[INDEX_NEXT]], 272
; CHECK-NEXT: br i1 [[TMP111]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: ._crit_edge:
; CHECK-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP104]], [[TMP101]]
; CHECK-NEXT: [[BIN_RDX34:%.*]] = add i32 [[TMP107]], [[BIN_RDX]]
@@ -581,17 +569,9 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-NEXT: [[TMP119:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP119]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP119]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[IF_THEN:%.*]], label [[FOR_INC:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: br label [[FOR_INC]]
-; CHECK: for.inc:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5]]
+; CHECK-NEXT: br label [[FOR_INC:%.*]]
; CHECK: for.end:
; CHECK-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP112]], [[TMP109]]
; CHECK-NEXT: [[BIN_RDX36:%.*]] = mul i32 [[TMP115]], [[BIN_RDX]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
index f4d4cca..ec7fde8 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
@@ -23,21 +23,8 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK: [[_LR_PH:.*:]]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L7:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-NEXT: [[L7]] = add i32 [[SUM_02]], [[L3]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK: [[__CRIT_EDGE:.*:]]
-; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L7]], %[[DOTLR_PH]] ], [ [[TMP2]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP2]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sum_single(
; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]]) {
@@ -61,22 +48,9 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP5]], [[TMP3]]
+; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP5]], [[TMP3]]
; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK-INTERLEAVED: [[_LR_PH:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L7:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L7]] = add i32 [[SUM_02]], [[L3]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L7]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]]
;
entry:
@@ -125,26 +99,8 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK: [[_LR_PH:.*:]]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4
-; CHECK-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32
-; CHECK-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L6]]
-; CHECK-NEXT: [[L8:%.*]] = add i32 [[L7]], [[L3]]
-; CHECK-NEXT: [[L9]] = add i32 [[L8]], [[L5]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK: [[__CRIT_EDGE:.*:]]
-; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP7]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sum(
; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) {
@@ -183,27 +139,9 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP15]], [[TMP13]]
+; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP15]], [[TMP13]]
; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK-INTERLEAVED: [[_LR_PH:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32
-; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L6]]
-; CHECK-INTERLEAVED-NEXT: [[L8:%.*]] = add i32 [[L7]], [[L3]]
-; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[L8]], [[L5]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]]
;
entry:
@@ -251,22 +189,8 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK: [[_LR_PH:.*:]]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L3]]
-; CHECK-NEXT: [[L9]] = add i32 [[L7]], 3
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK: [[__CRIT_EDGE:.*:]]
-; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP3]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sum_const(
; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]]) {
@@ -294,23 +218,9 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP7]], [[TMP6]]
+; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP7]], [[TMP6]]
; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK-INTERLEAVED: [[_LR_PH:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L3]]
-; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[L7]], 3
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]]
;
entry:
@@ -360,26 +270,8 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK: [[_LR_PH:.*:]]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[PROD_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 1, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4
-; CHECK-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32
-; CHECK-NEXT: [[L7:%.*]] = mul i32 [[PROD_02]], [[L6]]
-; CHECK-NEXT: [[L8:%.*]] = mul i32 [[L7]], [[L3]]
-; CHECK-NEXT: [[L9]] = mul i32 [[L8]], [[L5]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK: [[__CRIT_EDGE:.*:]]
-; CHECK-NEXT: [[PROD_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[PROD_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP7]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_prod(
; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) {
@@ -418,27 +310,9 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP15]], [[TMP13]]
+; CHECK-INTERLEAVED-NEXT: [[PROD_0_LCSSA:%.*]] = mul i32 [[TMP15]], [[TMP13]]
; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK-INTERLEAVED: [[_LR_PH:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[PROD_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 1, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32
-; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = mul i32 [[PROD_02]], [[L6]]
-; CHECK-INTERLEAVED-NEXT: [[L8:%.*]] = mul i32 [[L7]], [[L3]]
-; CHECK-INTERLEAVED-NEXT: [[L9]] = mul i32 [[L8]], [[L5]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[PROD_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret i32 [[PROD_0_LCSSA]]
;
entry:
@@ -491,26 +365,8 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK: [[_LR_PH:.*:]]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4
-; CHECK-NEXT: [[L6:%.*]] = mul nsw i32 [[L5]], [[L3]]
-; CHECK-NEXT: [[L7:%.*]] = trunc i64 [[INDVARS_IV]] to i32
-; CHECK-NEXT: [[L8:%.*]] = add i32 [[SUM_02]], [[L7]]
-; CHECK-NEXT: [[L9]] = add i32 [[L8]], [[L6]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK: [[__CRIT_EDGE:.*:]]
-; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP6]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_mix(
; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) {
@@ -547,27 +403,9 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP10]]
+; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP13]], [[TMP10]]
; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK-INTERLEAVED: [[_LR_PH:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = mul nsw i32 [[L5]], [[L3]]
-; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = trunc i64 [[INDVARS_IV]] to i32
-; CHECK-INTERLEAVED-NEXT: [[L8:%.*]] = add i32 [[SUM_02]], [[L7]]
-; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[L8]], [[L6]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]]
;
entry:
@@ -617,24 +455,8 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK: [[_LR_PH:.*:]]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L7:%.*]], %[[DOTLR_PH]] ], [ 19, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4
-; CHECK-NEXT: [[L6:%.*]] = mul i32 [[SUM_02]], [[L3]]
-; CHECK-NEXT: [[L7]] = mul i32 [[L6]], [[L5]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK: [[__CRIT_EDGE:.*:]]
-; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L7]], %[[DOTLR_PH]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP5]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_mul(
; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) {
@@ -666,25 +488,9 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP11]], [[TMP9]]
+; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = mul i32 [[TMP11]], [[TMP9]]
; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK-INTERLEAVED: [[_LR_PH:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L7:%.*]], %[[DOTLR_PH]] ], [ 19, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = mul i32 [[SUM_02]], [[L3]]
-; CHECK-INTERLEAVED-NEXT: [[L7]] = mul i32 [[L6]], [[L5]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L7]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]]
;
entry:
@@ -731,24 +537,8 @@ define i32 @start_at_non_zero(ptr nocapture %in, ptr nocapture %coeff, ptr nocap
; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_09:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 120, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[COEFF]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[L1]], [[L0]]
-; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[SUM_09]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP4]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @start_at_non_zero(
; CHECK-INTERLEAVED-SAME: ptr captures(none) [[IN:%.*]], ptr captures(none) [[COEFF:%.*]], ptr captures(none) [[OUT:%.*]]) {
@@ -780,24 +570,8 @@ define i32 @start_at_non_zero(ptr nocapture %in, ptr nocapture %coeff, ptr nocap
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP9]], [[TMP6]]
; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-INTERLEAVED: [[FOR_BODY]]:
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_09:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 120, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[COEFF]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[MUL:%.*]] = mul nsw i32 [[L1]], [[L0]]
-; CHECK-INTERLEAVED-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[SUM_09]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK-INTERLEAVED: [[FOR_END]]:
-; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
-; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]]
+; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]]
;
entry:
br label %for.body
@@ -844,24 +618,8 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[AND:%.*]], %[[FOR_BODY]] ], [ -1, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = and i32 [[RESULT_08]], [[L0]]
-; CHECK-NEXT: [[AND]] = and i32 [[ADD]], [[L1]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP5]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_and(
; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) {
@@ -893,25 +651,9 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = and i32 [[TMP11]], [[TMP9]]
+; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = and i32 [[TMP11]], [[TMP9]]
; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-INTERLEAVED: [[FOR_BODY]]:
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[AND:%.*]], %[[FOR_BODY]] ], [ -1, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = and i32 [[RESULT_08]], [[L0]]
-; CHECK-INTERLEAVED-NEXT: [[AND]] = and i32 [[ADD]], [[L1]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK-INTERLEAVED: [[FOR_END]]:
-; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT_0_LCSSA]]
;
entry:
@@ -958,24 +700,8 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[OR:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[L1]], [[L0]]
-; CHECK-NEXT: [[OR]] = or i32 [[ADD]], [[RESULT_08]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP4]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_or(
; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) {
@@ -1005,25 +731,9 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = or i32 [[TMP9]], [[TMP7]]
+; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = or i32 [[TMP9]], [[TMP7]]
; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-INTERLEAVED: [[FOR_BODY]]:
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[OR:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = add nsw i32 [[L1]], [[L0]]
-; CHECK-INTERLEAVED-NEXT: [[OR]] = or i32 [[ADD]], [[RESULT_08]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK-INTERLEAVED: [[FOR_END]]:
-; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT_0_LCSSA]]
;
entry:
@@ -1070,24 +780,8 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[XOR:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[L1]], [[L0]]
-; CHECK-NEXT: [[XOR]] = xor i32 [[ADD]], [[RESULT_08]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP4]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_xor(
; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) {
@@ -1117,25 +811,9 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = xor i32 [[TMP9]], [[TMP7]]
+; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = xor i32 [[TMP9]], [[TMP7]]
; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-INTERLEAVED: [[FOR_BODY]]:
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[XOR:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = add nsw i32 [[L1]], [[L0]]
-; CHECK-INTERLEAVED-NEXT: [[XOR]] = xor i32 [[ADD]], [[RESULT_08]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK-INTERLEAVED: [[FOR_END]]:
-; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT_0_LCSSA]]
;
entry:
@@ -1183,24 +861,8 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[RESULT_08:%.*]] = phi float [ [[FADD:%.*]], %[[FOR_BODY]] ], [ 0.000000e+00, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[RESULT_08]], [[L0]]
-; CHECK-NEXT: [[FADD]] = fadd fast float [[ADD]], [[L1]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[FADD]], %[[FOR_BODY]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[RESULT_0_LCSSA]]
+; CHECK-NEXT: ret float [[TMP3]]
;
; CHECK-INTERLEAVED-LABEL: define float @reduction_fadd(
; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) {
@@ -1232,25 +894,9 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd fast float [[TMP7]], [[TMP6]]
+; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = fadd fast float [[TMP7]], [[TMP6]]
; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-INTERLEAVED: [[FOR_BODY]]:
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi float [ [[FADD:%.*]], %[[FOR_BODY]] ], [ 0.000000e+00, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = fadd fast float [[RESULT_08]], [[L0]]
-; CHECK-INTERLEAVED-NEXT: [[FADD]] = fadd fast float [[ADD]], [[L1]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK-INTERLEAVED: [[FOR_END]]:
-; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[FADD]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret float [[RESULT_0_LCSSA]]
;
entry:
@@ -1298,24 +944,8 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[RESULT_08:%.*]] = phi float [ [[FMUL:%.*]], %[[FOR_BODY]] ], [ 0.000000e+00, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = fmul fast float [[RESULT_08]], [[L0]]
-; CHECK-NEXT: [[FMUL]] = fmul fast float [[ADD]], [[L1]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[FMUL]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[RESULT_0_LCSSA]]
+; CHECK-NEXT: ret float [[TMP5]]
;
; CHECK-INTERLEAVED-LABEL: define float @reduction_fmul(
; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) {
@@ -1347,25 +977,9 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fmul fast float [[TMP11]], [[TMP9]]
+; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = fmul fast float [[TMP11]], [[TMP9]]
; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-INTERLEAVED: [[FOR_BODY]]:
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi float [ [[FMUL:%.*]], %[[FOR_BODY]] ], [ 0.000000e+00, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = fmul fast float [[RESULT_08]], [[L0]]
-; CHECK-INTERLEAVED-NEXT: [[FMUL]] = fmul fast float [[ADD]], [[L1]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK-INTERLEAVED: [[FOR_END]]:
-; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[FMUL]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret float [[RESULT_0_LCSSA]]
;
entry:
@@ -1410,21 +1024,8 @@ define i32 @reduction_sub_lhs(ptr noalias nocapture %A) {
; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], %[[FOR_BODY]] ], [ 3, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[SUB]] = sub nsw i32 [[X_05]], [[L0]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[X_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP5]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sub_lhs(
; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]]) {
@@ -1450,21 +1051,8 @@ define i32 @reduction_sub_lhs(ptr noalias nocapture %A) {
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP5]], [[TMP7]]
; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-INTERLEAVED: [[FOR_BODY]]:
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], %[[FOR_BODY]] ], [ 3, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-INTERLEAVED-NEXT: [[SUB]] = sub nsw i32 [[X_05]], [[L0]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK-INTERLEAVED: [[FOR_END]]:
-; CHECK-INTERLEAVED-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
-; CHECK-INTERLEAVED-NEXT: ret i32 [[X_0_LCSSA]]
+; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]]
;
entry:
br label %for.body
@@ -1519,38 +1107,8 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[PREDPHI3]])
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_INC:.*]] ]
-; CHECK-NEXT: [[SUM_033:%.*]] = phi float [ [[S]], %[[SCALAR_PH]] ], [ [[SUM_1:%.*]], %[[FOR_INC]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[CMP3:%.*]] = fcmp ogt float [[L0]], [[L1]]
-; CHECK-NEXT: br i1 [[CMP3]], label %[[IF_THEN:.*]], label %[[FOR_INC]]
-; CHECK: [[IF_THEN]]:
-; CHECK-NEXT: [[CMP6:%.*]] = fcmp ogt float [[L1]], 1.000000e+00
-; CHECK-NEXT: br i1 [[CMP6]], label %[[IF_THEN8:.*]], label %[[IF_ELSE:.*]]
-; CHECK: [[IF_THEN8]]:
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[SUM_033]], [[L0]]
-; CHECK-NEXT: br label %[[FOR_INC]]
-; CHECK: [[IF_ELSE]]:
-; CHECK-NEXT: [[CMP14:%.*]] = fcmp ogt float [[L0]], 2.000000e+00
-; CHECK-NEXT: br i1 [[CMP14]], label %[[IF_THEN16:.*]], label %[[FOR_INC]]
-; CHECK: [[IF_THEN16]]:
-; CHECK-NEXT: [[ADD19:%.*]] = fadd fast float [[SUM_033]], [[L1]]
-; CHECK-NEXT: br label %[[FOR_INC]]
-; CHECK: [[FOR_INC]]:
-; CHECK-NEXT: [[SUM_1]] = phi float [ [[ADD]], %[[IF_THEN8]] ], [ [[ADD19]], %[[IF_THEN16]] ], [ [[SUM_033]], %[[IF_ELSE]] ], [ [[SUM_033]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[LFTR_WIDEIV]], 128
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[FOR_END]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi float [ [[SUM_1]], %[[FOR_INC]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[SUM_1_LCSSA]]
+; CHECK-NEXT: ret float [[TMP13]]
;
; CHECK-INTERLEAVED-LABEL: define float @reduction_conditional(
; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], float [[S:%.*]]) {
@@ -1602,38 +1160,8 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) {
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[PREDPHI9]], [[PREDPHI6]]
; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[BIN_RDX]])
; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-INTERLEAVED: [[FOR_BODY]]:
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_INC:.*]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_033:%.*]] = phi float [ [[S]], %[[SCALAR_PH]] ], [ [[SUM_1:%.*]], %[[FOR_INC]] ]
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[CMP3:%.*]] = fcmp ogt float [[L0]], [[L1]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[CMP3]], label %[[IF_THEN:.*]], label %[[FOR_INC]]
-; CHECK-INTERLEAVED: [[IF_THEN]]:
-; CHECK-INTERLEAVED-NEXT: [[CMP6:%.*]] = fcmp ogt float [[L1]], 1.000000e+00
-; CHECK-INTERLEAVED-NEXT: br i1 [[CMP6]], label %[[IF_THEN8:.*]], label %[[IF_ELSE:.*]]
-; CHECK-INTERLEAVED: [[IF_THEN8]]:
-; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = fadd fast float [[SUM_033]], [[L0]]
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_INC]]
-; CHECK-INTERLEAVED: [[IF_ELSE]]:
-; CHECK-INTERLEAVED-NEXT: [[CMP14:%.*]] = fcmp ogt float [[L0]], 2.000000e+00
-; CHECK-INTERLEAVED-NEXT: br i1 [[CMP14]], label %[[IF_THEN16:.*]], label %[[FOR_INC]]
-; CHECK-INTERLEAVED: [[IF_THEN16]]:
-; CHECK-INTERLEAVED-NEXT: [[ADD19:%.*]] = fadd fast float [[SUM_033]], [[L1]]
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_INC]]
-; CHECK-INTERLEAVED: [[FOR_INC]]:
-; CHECK-INTERLEAVED-NEXT: [[SUM_1]] = phi float [ [[ADD]], %[[IF_THEN8]] ], [ [[ADD19]], %[[IF_THEN16]] ], [ [[SUM_033]], %[[IF_ELSE]] ], [ [[SUM_033]], %[[FOR_BODY]] ]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[LFTR_WIDEIV]], 128
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[FOR_END]]
; CHECK-INTERLEAVED: [[FOR_END]]:
-; CHECK-INTERLEAVED-NEXT: [[SUM_1_LCSSA:%.*]] = phi float [ [[SUM_1]], %[[FOR_INC]] ], [ [[TMP24]], %[[MIDDLE_BLOCK]] ]
-; CHECK-INTERLEAVED-NEXT: ret float [[SUM_1_LCSSA]]
+; CHECK-INTERLEAVED-NEXT: ret float [[TMP24]]
;
entry:
br label %for.body
@@ -1679,11 +1207,11 @@ for.end:
define i32 @reduction_sum_multiuse(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-LABEL: define i32 @reduction_sum_multiuse(
; CHECK-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[_LR_PH1:.*]]:
; CHECK-NEXT: br label %[[DOTLR_PH:.*]]
; CHECK: [[_LR_PH:.*:]]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], %[[DOTLR_PH]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[_LR_PH1]] ]
+; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], %[[DOTLR_PH]] ], [ 0, %[[_LR_PH1]] ]
; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
@@ -1703,11 +1231,11 @@ define i32 @reduction_sum_multiuse(ptr noalias nocapture %A, ptr noalias nocaptu
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sum_multiuse(
; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) {
-; CHECK-INTERLEAVED-NEXT: [[ENTRY:.*]]:
+; CHECK-INTERLEAVED-NEXT: [[_LR_PH1:.*]]:
; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]]
; CHECK-INTERLEAVED: [[_LR_PH:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[ENTRY]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], %[[DOTLR_PH]] ], [ 0, %[[ENTRY]] ]
+; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[_LR_PH1]] ]
+; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], %[[DOTLR_PH]] ], [ 0, %[[_LR_PH1]] ]
; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
@@ -1778,26 +1306,8 @@ define i32 @reduction_predicated(ptr noalias nocapture %A, ptr noalias nocapture
; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK: [[_LR_PH:.*:]]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4
-; CHECK-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32
-; CHECK-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L6]]
-; CHECK-NEXT: [[L8:%.*]] = add i32 [[L7]], [[L3]]
-; CHECK-NEXT: [[L9]] = add i32 [[L8]], [[L5]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: [[__CRIT_EDGE:.*:]]
-; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP7]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_predicated(
; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) {
@@ -1836,27 +1346,9 @@ define i32 @reduction_predicated(ptr noalias nocapture %A, ptr noalias nocapture
; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP15]], [[TMP13]]
+; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP15]], [[TMP13]]
; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK-INTERLEAVED: [[_LR_PH:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32
-; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L6]]
-; CHECK-INTERLEAVED-NEXT: [[L8:%.*]] = add i32 [[L7]], [[L3]]
-; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[L8]], [[L5]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]]
;
entry:
@@ -1902,27 +1394,13 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP5]] = zext <4 x i8> [[TMP4]] to <4 x i32>
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
-; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP7:%.*]] = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> [[TMP4]])
; CHECK-NEXT: [[TMP8:%.*]] = zext i8 [[TMP7]] to i32
; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK: [[_LR_PH:.*:]]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 255, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 255
-; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDVARS_IV]]
-; CHECK-NEXT: [[L3:%.*]] = load i8, ptr [[L2]], align 4
-; CHECK-NEXT: [[L3E:%.*]] = zext i8 [[L3]] to i32
-; CHECK-NEXT: [[L9]] = add i32 [[SUM_02]], [[L3E]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK: [[__CRIT_EDGE:.*:]]
-; CHECK-NEXT: [[SUM_0_LCSSA1:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[SUM_0_LCSSA1]] to i8
+; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[TMP8]] to i8
; CHECK-NEXT: ret i8 [[SUM_0_LCSSA]]
;
; CHECK-INTERLEAVED-LABEL: define i8 @reduction_add_trunc(
@@ -1951,28 +1429,14 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) {
; CHECK-INTERLEAVED-NEXT: [[TMP11]] = zext <4 x i8> [[TMP9]] to <4 x i32>
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i8> [[TMP9]], [[TMP8]]
; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> [[BIN_RDX]])
; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext i8 [[TMP13]] to i32
; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK-INTERLEAVED: [[_LR_PH:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 255, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 255
-; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i8, ptr [[L2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L3E:%.*]] = zext i8 [[L3]] to i32
-; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[SUM_02]], [[L3E]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA1:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP14]], %[[MIDDLE_BLOCK]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[SUM_0_LCSSA1]] to i8
+; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[TMP14]] to i8
; CHECK-INTERLEAVED-NEXT: ret i8 [[SUM_0_LCSSA]]
;
entry:
@@ -2016,27 +1480,13 @@ define i8 @reduction_and_trunc(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP5]] = zext <4 x i8> [[TMP4]] to <4 x i32>
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
-; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP7:%.*]] = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> [[TMP4]])
; CHECK-NEXT: [[TMP8:%.*]] = zext i8 [[TMP7]] to i32
; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK: [[_LR_PH:.*:]]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 255, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 255
-; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDVARS_IV]]
-; CHECK-NEXT: [[L3:%.*]] = load i8, ptr [[L2]], align 4
-; CHECK-NEXT: [[L3E:%.*]] = zext i8 [[L3]] to i32
-; CHECK-NEXT: [[L9]] = and i32 [[SUM_02]], [[L3E]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK: [[__CRIT_EDGE:.*:]]
-; CHECK-NEXT: [[SUM_0_LCSSA1:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[SUM_0_LCSSA1]] to i8
+; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[TMP8]] to i8
; CHECK-NEXT: ret i8 [[SUM_0_LCSSA]]
;
; CHECK-INTERLEAVED-LABEL: define i8 @reduction_and_trunc(
@@ -2065,28 +1515,14 @@ define i8 @reduction_and_trunc(ptr noalias nocapture %A) {
; CHECK-INTERLEAVED-NEXT: [[TMP11]] = zext <4 x i8> [[TMP9]] to <4 x i32>
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = and <4 x i8> [[TMP9]], [[TMP8]]
; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> [[BIN_RDX]])
; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext i8 [[TMP13]] to i32
; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK-INTERLEAVED: [[_LR_PH:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 255, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 255
-; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i8, ptr [[L2]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L3E:%.*]] = zext i8 [[L3]] to i32
-; CHECK-INTERLEAVED-NEXT: [[L9]] = and i32 [[SUM_02]], [[L3E]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]]
; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]]
-; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA1:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP14]], %[[MIDDLE_BLOCK]] ]
-; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[SUM_0_LCSSA1]] to i8
+; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[TMP14]] to i8
; CHECK-INTERLEAVED-NEXT: ret i8 [[SUM_0_LCSSA]]
;
entry:
@@ -2133,7 +1569,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[TMP4]] = fadd float [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
@@ -2151,7 +1587,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP6]], float [[TMP7]], float [[SUM_07]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[MULADD_LCSSA]]
@@ -2185,7 +1621,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK-INTERLEAVED-NEXT: [[TMP9]] = fadd float [[VEC_PHI1]], [[TMP8]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd float [[TMP9]], [[TMP7]]
; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
@@ -2204,7 +1640,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK-INTERLEAVED-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[SUM_07]])
; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK-INTERLEAVED: [[FOR_END]]:
; CHECK-INTERLEAVED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret float [[MULADD_LCSSA]]
@@ -2373,7 +1809,7 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) {
; CHECK-NEXT: [[TMP7]] = fadd float [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -2388,17 +1824,17 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) {
; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: br i1 [[C]], label %[[FOO:.*]], label %[[BAR:.*]]
-; CHECK: [[FOO]]:
+; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]]
+; CHECK: [[IF]]:
; CHECK-NEXT: br label %[[LATCH]]
-; CHECK: [[BAR]]:
+; CHECK: [[ELSE]]:
; CHECK-NEXT: [[MULADD:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP9]], float [[TMP10]], float [[SUM]])
; CHECK-NEXT: br label %[[LATCH]]
; CHECK: [[LATCH]]:
-; CHECK-NEXT: [[SUM_NEXT]] = phi float [ [[SUM]], %[[FOO]] ], [ [[MULADD]], %[[BAR]] ]
+; CHECK-NEXT: [[SUM_NEXT]] = phi float [ [[SUM]], %[[IF]] ], [ [[MULADD]], %[[ELSE]] ]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi float [ [[SUM_NEXT]], %[[LATCH]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[SUM_NEXT_LCSSA]]
@@ -2437,7 +1873,7 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) {
; CHECK-INTERLEAVED-NEXT: [[TMP13]] = fadd float [[VEC_PHI1]], [[TMP12]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd float [[TMP13]], [[TMP10]]
; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
@@ -2463,7 +1899,7 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) {
; CHECK-INTERLEAVED-NEXT: [[SUM_NEXT]] = phi float [ [[SUM]], %[[IF]] ], [ [[MULADD]], %[[ELSE]] ]
; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK-INTERLEAVED: [[EXIT]]:
; CHECK-INTERLEAVED-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi float [ [[SUM_NEXT]], %[[LATCH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret float [[SUM_NEXT_LCSSA]]
@@ -2524,7 +1960,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h
; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END7:.*]], label %[[SCALAR_PH]]
@@ -2550,7 +1986,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h
; CHECK-NEXT: [[G_1]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[G_016]], %[[FOR_BODY2]] ]
; CHECK-NEXT: [[INC6]] = add nuw nsw i32 [[A_117]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC6]], [[I]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP27:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK: [[FOR_END7]]:
; CHECK-NEXT: [[G_1_LCSSA:%.*]] = phi i32 [ [[G_1]], %[[FOR_INC5]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[G_1_LCSSA]]
@@ -2590,7 +2026,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h
; CHECK-INTERLEAVED-NEXT: [[TMP14]] = add i32 [[VEC_PHI1]], [[TMP13]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP14]], [[TMP11]]
; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]]
@@ -2617,7 +2053,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h
; CHECK-INTERLEAVED-NEXT: [[G_1]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[G_016]], %[[FOR_BODY2]] ]
; CHECK-INTERLEAVED-NEXT: [[INC6]] = add nuw nsw i32 [[A_117]], 1
; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC6]], [[I]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP27:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK-INTERLEAVED: [[FOR_END7]]:
; CHECK-INTERLEAVED-NEXT: [[G_1_LCSSA:%.*]] = phi i32 [ [[G_1]], %[[FOR_INC5]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret i32 [[G_1_LCSSA]]
@@ -2680,7 +2116,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read
; CHECK-NEXT: [[TMP11]] = add i32 [[TMP8]], [[TMP10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END7:.*]], label %[[SCALAR_PH]]
@@ -2707,7 +2143,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read
; CHECK-NEXT: [[G_1]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[G_016]], %[[FOR_BODY2]] ]
; CHECK-NEXT: [[INC6]] = add nuw nsw i32 [[A_117]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC6]], [[I]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP29:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK: [[FOR_END7]]:
; CHECK-NEXT: [[G_1_LCSSA:%.*]] = phi i32 [ [[G_1]], %[[FOR_INC5]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[G_1_LCSSA]]
@@ -2753,7 +2189,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read
; CHECK-INTERLEAVED-NEXT: [[TMP20]] = add i32 [[TMP14]], [[TMP19]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP20]], [[TMP17]]
; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]]
@@ -2781,7 +2217,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read
; CHECK-INTERLEAVED-NEXT: [[G_1]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[G_016]], %[[FOR_BODY2]] ]
; CHECK-INTERLEAVED-NEXT: [[INC6]] = add nuw nsw i32 [[A_117]], 1
; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC6]], [[I]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP29:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK-INTERLEAVED: [[FOR_END7]]:
; CHECK-INTERLEAVED-NEXT: [[G_1_LCSSA:%.*]] = phi i32 [ [[G_1]], %[[FOR_INC5]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
; CHECK-INTERLEAVED-NEXT: ret i32 [[G_1_LCSSA]]
@@ -2890,34 +2326,11 @@ define i32 @predicated_or_dominates_reduction(ptr %b) {
; CHECK-NEXT: [[TMP48]] = add i32 [[VEC_PHI]], [[TMP47]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP49:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP]]:
-; CHECK-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], %[[FOR_INC:.*]] ], [ [[TMP48]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[A_1_LCSSA]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[G_09:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC3:%.*]], %[[FOR_INC]] ]
-; CHECK-NEXT: [[A_08:%.*]] = phi i32 [ undef, %[[SCALAR_PH]] ], [ [[A_1]], %[[FOR_INC]] ]
-; CHECK-NEXT: [[D:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i32 0, i32 [[G_09]], i32 1
-; CHECK-NEXT: [[TMP45:%.*]] = load i32, ptr [[D]], align 4
-; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP45]], 0
-; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[LOR_LHS_FALSE:.*]], label %[[IF_THEN:.*]]
-; CHECK: [[LOR_LHS_FALSE]]:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i32 0, i32 [[G_09]]
-; CHECK-NEXT: [[TMP46:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[TOBOOL2_NOT:%.*]] = icmp eq i32 [[TMP46]], 0
-; CHECK-NEXT: br i1 [[TOBOOL2_NOT]], label %[[FOR_INC]], label %[[IF_THEN]]
-; CHECK: [[IF_THEN]]:
-; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[A_08]], 1
-; CHECK-NEXT: br label %[[FOR_INC]]
-; CHECK: [[FOR_INC]]:
-; CHECK-NEXT: [[A_1]] = phi i32 [ [[INC]], %[[IF_THEN]] ], [ [[A_08]], %[[LOR_LHS_FALSE]] ]
-; CHECK-NEXT: [[INC3]] = add nuw nsw i32 [[G_09]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC3]], 1000
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
+; CHECK-NEXT: ret i32 [[TMP48]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @predicated_or_dominates_reduction(
; CHECK-INTERLEAVED-SAME: ptr [[B:%.*]]) {
@@ -3051,35 +2464,12 @@ define i32 @predicated_or_dominates_reduction(ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[TMP98]] = add i32 [[VEC_PHI1]], [[TMP97]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-INTERLEAVED-NEXT: [[TMP99:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP99]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP99]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP98]], [[TMP94]]
; CHECK-INTERLEAVED-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]]
; CHECK-INTERLEAVED: [[FOR_COND_CLEANUP]]:
-; CHECK-INTERLEAVED-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], %[[FOR_INC:.*]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
-; CHECK-INTERLEAVED-NEXT: ret i32 [[A_1_LCSSA]]
-; CHECK-INTERLEAVED: [[FOR_BODY]]:
-; CHECK-INTERLEAVED-NEXT: [[G_09:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC3:%.*]], %[[FOR_INC]] ]
-; CHECK-INTERLEAVED-NEXT: [[A_08:%.*]] = phi i32 [ undef, %[[SCALAR_PH]] ], [ [[A_1]], %[[FOR_INC]] ]
-; CHECK-INTERLEAVED-NEXT: [[D:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i32 0, i32 [[G_09]], i32 1
-; CHECK-INTERLEAVED-NEXT: [[TMP100:%.*]] = load i32, ptr [[D]], align 4
-; CHECK-INTERLEAVED-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP100]], 0
-; CHECK-INTERLEAVED-NEXT: br i1 [[TOBOOL_NOT]], label %[[LOR_LHS_FALSE:.*]], label %[[IF_THEN:.*]]
-; CHECK-INTERLEAVED: [[LOR_LHS_FALSE]]:
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i32 0, i32 [[G_09]]
-; CHECK-INTERLEAVED-NEXT: [[TMP101:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-INTERLEAVED-NEXT: [[TOBOOL2_NOT:%.*]] = icmp eq i32 [[TMP101]], 0
-; CHECK-INTERLEAVED-NEXT: br i1 [[TOBOOL2_NOT]], label %[[FOR_INC]], label %[[IF_THEN]]
-; CHECK-INTERLEAVED: [[IF_THEN]]:
-; CHECK-INTERLEAVED-NEXT: [[INC:%.*]] = add nsw i32 [[A_08]], 1
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_INC]]
-; CHECK-INTERLEAVED: [[FOR_INC]]:
-; CHECK-INTERLEAVED-NEXT: [[A_1]] = phi i32 [ [[INC]], %[[IF_THEN]] ], [ [[A_08]], %[[LOR_LHS_FALSE]] ]
-; CHECK-INTERLEAVED-NEXT: [[INC3]] = add nuw nsw i32 [[G_09]], 1
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC3]], 1000
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]]
+; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]]
;
entry:
br label %for.body
@@ -3135,27 +2525,11 @@ define i32 @reduction_add_sub(ptr noalias nocapture %A, ptr noalias nocapture %B
; CHECK-NEXT: [[TMP6]] = add i32 [[TMP4]], [[TMP5]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
-; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[L0_B:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[X_05]], [[L0]]
-; CHECK-NEXT: [[SUB]] = sub nsw i32 [[ADD]], [[L0_B]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[FOR_BODY]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[X_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP6]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_add_sub(
; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) {
@@ -3187,28 +2561,12 @@ define i32 @reduction_add_sub(ptr noalias nocapture %A, ptr noalias nocapture %B
; CHECK-INTERLEAVED-NEXT: [[TMP13]] = add i32 [[TMP9]], [[TMP12]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP11]]
; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-INTERLEAVED: [[FOR_BODY]]:
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L0_B:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4
-; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = add nsw i32 [[X_05]], [[L0]]
-; CHECK-INTERLEAVED-NEXT: [[SUB]] = sub nsw i32 [[ADD]], [[L0_B]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK-INTERLEAVED: [[FOR_END]]:
-; CHECK-INTERLEAVED-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
-; CHECK-INTERLEAVED-NEXT: ret i32 [[X_0_LCSSA]]
+; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]]
;
entry:
br label %for.body
@@ -3254,27 +2612,11 @@ define i32 @reduction_sub_add(ptr noalias nocapture %A, ptr noalias nocapture %B
; CHECK-NEXT: [[TMP6]] = add i32 [[TMP4]], [[TMP5]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
-; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[FOR_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[L0_B:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4
-; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X_05]], [[L0]]
-; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUB]], [[L0_B]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[X_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP6]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sub_add(
; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) {
@@ -3306,28 +2648,12 @@ define i32 @reduction_sub_add(ptr noalias nocapture %A, ptr noalias nocapture %B
; CHECK-INTERLEAVED-NEXT: [[TMP13]] = add i32 [[TMP9]], [[TMP12]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP11]]
; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]]
-; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]:
-; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK-INTERLEAVED: [[FOR_BODY]]:
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[X_05:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
-; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-INTERLEAVED-NEXT: [[L0_B:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4
-; CHECK-INTERLEAVED-NEXT: [[SUB:%.*]] = sub nsw i32 [[X_05]], [[L0]]
-; CHECK-INTERLEAVED-NEXT: [[ADD]] = add nsw i32 [[SUB]], [[L0_B]]
-; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
-; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]]
; CHECK-INTERLEAVED: [[FOR_END]]:
-; CHECK-INTERLEAVED-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
-; CHECK-INTERLEAVED-NEXT: ret i32 [[X_0_LCSSA]]
+; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]]
;
entry:
br label %for.body
@@ -3351,6 +2677,129 @@ for.end: ; preds = %for.body, %entry
ret i32 %x.0.lcssa
}
+; Test that bundling recipes that share an operand into an expression works.
+; In this case the two extends are the recipes that share an operand.
+define i64 @reduction_expression_same_operands(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) {
+; CHECK-LABEL: define i64 @reduction_expression_same_operands(
+; CHECK-SAME: ptr readonly captures(none) [[X:%.*]], ptr readonly captures(none) [[Y:%.*]], i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], 4
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i64>
+; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i64> [[TMP3]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP4]])
+; CHECK-NEXT: [[TMP6]] = add i64 [[VEC_PHI]], [[TMP5]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[RDX:%.*]] = phi i64 [ [[RDX_NEXT:%.*]], %[[LOOP]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[IV]]
+; CHECK-NEXT: [[LOAD0:%.*]] = load i16, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CONV0:%.*]] = sext i16 [[LOAD0]] to i32
+; CHECK-NEXT: [[CONV1:%.*]] = sext i16 [[LOAD0]] to i32
+; CHECK-NEXT: [[MUL1:%.*]] = mul nsw i32 [[CONV0]], [[CONV1]]
+; CHECK-NEXT: [[MUL:%.*]] = sext i32 [[MUL1]] to i64
+; CHECK-NEXT: [[RDX_NEXT]] = add nsw i64 [[RDX]], [[MUL]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP31:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ [[RDX_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i64 [[R_0_LCSSA]]
+;
+; CHECK-INTERLEAVED-LABEL: define i64 @reduction_expression_same_operands(
+; CHECK-INTERLEAVED-SAME: ptr readonly captures(none) [[X:%.*]], ptr readonly captures(none) [[Y:%.*]], i32 [[N:%.*]]) {
+; CHECK-INTERLEAVED-NEXT: [[ENTRY:.*]]:
+; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 8
+; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-INTERLEAVED: [[VECTOR_PH]]:
+; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], 8
+; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]]
+; CHECK-INTERLEAVED-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK-INTERLEAVED: [[VECTOR_BODY]]:
+; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[INDEX]]
+; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i32 4
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP1]], align 4
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i16>, ptr [[TMP2]], align 4
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nsw <4 x i64> [[TMP4]], [[TMP4]]
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP5]])
+; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add i64 [[VEC_PHI]], [[TMP6]]
+; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = sext <4 x i16> [[WIDE_LOAD2]] to <4 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nsw <4 x i64> [[TMP9]], [[TMP9]]
+; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP10]])
+; CHECK-INTERLEAVED-NEXT: [[TMP12]] = add i64 [[VEC_PHI1]], [[TMP11]]
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
+; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i64 [[TMP12]], [[TMP7]]
+; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-INTERLEAVED: [[SCALAR_PH]]:
+; CHECK-INTERLEAVED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-INTERLEAVED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-INTERLEAVED-NEXT: br label %[[LOOP:.*]]
+; CHECK-INTERLEAVED: [[LOOP]]:
+; CHECK-INTERLEAVED-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-INTERLEAVED-NEXT: [[RDX:%.*]] = phi i64 [ [[RDX_NEXT:%.*]], %[[LOOP]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
+; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[IV]]
+; CHECK-INTERLEAVED-NEXT: [[LOAD0:%.*]] = load i16, ptr [[ARRAYIDX]], align 4
+; CHECK-INTERLEAVED-NEXT: [[CONV0:%.*]] = sext i16 [[LOAD0]] to i32
+; CHECK-INTERLEAVED-NEXT: [[CONV1:%.*]] = sext i16 [[LOAD0]] to i32
+; CHECK-INTERLEAVED-NEXT: [[MUL1:%.*]] = mul nsw i32 [[CONV0]], [[CONV1]]
+; CHECK-INTERLEAVED-NEXT: [[MUL:%.*]] = sext i32 [[MUL1]] to i64
+; CHECK-INTERLEAVED-NEXT: [[RDX_NEXT]] = add nsw i64 [[RDX]], [[MUL]]
+; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
+; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP31:![0-9]+]]
+; CHECK-INTERLEAVED: [[EXIT]]:
+; CHECK-INTERLEAVED-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ [[RDX_NEXT]], %[[LOOP]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ]
+; CHECK-INTERLEAVED-NEXT: ret i64 [[R_0_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ]
+ %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i16, ptr %x, i32 %iv
+ %load0 = load i16, ptr %arrayidx, align 4
+ %conv0 = sext i16 %load0 to i32
+ %conv1 = sext i16 %load0 to i32
+ %mul = mul nsw i32 %conv0, %conv1
+ %conv = sext i32 %mul to i64
+ %rdx.next = add nsw i64 %rdx, %conv
+ %iv.next = add nuw nsw i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %exit, label %loop
+
+exit:
+ %r.0.lcssa = phi i64 [ %rdx.next, %loop ]
+ ret i64 %r.0.lcssa
+}
+
declare float @llvm.fmuladd.f32(float, float, float)
!6 = distinct !{!6, !7, !8}
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-order.ll b/llvm/test/Transforms/LoopVectorize/reduction-order.ll
index b07c3833..b51db48 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-order.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-order.ll
@@ -1,63 +1,93 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
; RUN: opt -passes='loop-vectorize' -force-vector-width=4 -force-vector-interleave=1 -S < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
; Make sure the selects generated from reduction are always emitted
; in deterministic order.
-; CHECK-LABEL: @foo(
-; CHECK: vector.body:
-; CHECK: [[VEC_PHI_1:%.+]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[ADD_5:%.+]], %vector.body ]
-; CHECK: [[VEC_PHI_2:%.+]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[ADD_3:%.+]], %vector.body ]
-; CHECK: icmp ule <4 x i64>
-; CHECK-NEXT: [[ADD_3]] = add <4 x i32> splat (i32 3), [[VEC_PHI_2]]
-; CHECK-NEXT: [[ADD_5]] = add <4 x i32> [[VEC_PHI_1]], splat (i32 5)
-; CHECK: select <4 x i1> {{.*}}, <4 x i32> [[ADD_5]], <4 x i32>
-; CHECK-NEXT: select <4 x i1> {{.*}}, <4 x i32> [[ADD_3]], <4 x i32>
-; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
;
-define internal i64 @foo(ptr %t0) !prof !1 {
-t16:
- br label %t20
-
-t17: ; preds = %t20
- %t18 = phi i32 [ %t24, %t20 ]
- %t19 = phi i32 [ %t28, %t20 ]
- br label %t31
+define i32 @foo() !prof !1 {
+; CHECK-LABEL: define i32 @foo() {{.*}}{
+; CHECK-NEXT: [[T16:.*:]]
+; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI_1:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[ADD_5:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI_2:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[ADD_3:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[VEC_IV:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3>
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ule <4 x i64> [[VEC_IV]], splat (i64 9)
+; CHECK-NEXT: [[ADD_3]] = add <4 x i32> splat (i32 3), [[VEC_PHI_2]]
+; CHECK-NEXT: [[ADD_5]] = add <4 x i32> [[VEC_PHI_1]], splat (i32 5)
+; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[ADD_5]], <4 x i32> [[VEC_PHI_1]]
+; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[ADD_3]], <4 x i32> [[VEC_PHI_2]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 12
+; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !prof [[PROF1:![0-9]+]], !llvm.loop [[LOOP2:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]])
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
+; CHECK-NEXT: br label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP7]], [[TMP6]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+entry:
+ br label %loop
-t20: ; preds = %t20, %t16
- %t21 = phi i64 [ 0, %t16 ], [ %t29, %t20 ]
- %t22 = phi i32 [ 0, %t16 ], [ %t28, %t20 ]
- %t23 = phi i32 [ 0, %t16 ], [ %t24, %t20 ]
- %t24 = add i32 3, %t23
- %t28 = add i32 %t22, 5
- %t29 = add nuw nsw i64 %t21, 1
- %t30 = icmp eq i64 %t29, 10
- br i1 %t30, label %t17, label %t20, !prof !2
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %red.1 = phi i32 [ 0, %entry ], [ %red.1.next, %loop ]
+ %red.2 = phi i32 [ 0, %entry ], [ %red.2.next, %loop ]
+ %red.2.next = add i32 3, %red.2
+ %red.1.next = add i32 %red.1, 5
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, 10
+ br i1 %ec, label %exit, label %loop, !prof !2
-t31:
- ret i64 undef
+exit:
+ %r.2 = phi i32 [ %red.2.next, %loop ]
+ %r.1 = phi i32 [ %red.1.next, %loop ]
+ %add = add i32 %r.2, %r.1
+ ret i32 %add
}
; Make sure we do not fail when checking for ordered reduction. This test just
; exercises the path and bails out without performing vectorization.
-; CHECK-LABEL: quux
-; CHECK-NOT: fadd <4 x
-define void @quux(i1 %arg) {
-bb:
+define double @quux(i1 %arg) {
+; CHECK-LABEL: define double @quux(
+; CHECK-SAME: i1 [[ARG:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[HEADER:.*]]
+; CHECK: [[HEADER]]:
+; CHECK-NEXT: [[TMP5:%.*]] = phi double [ 1.300000e+01, %[[ENTRY]] ], [ [[TMP:%.*]], %[[LATCH:.*]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = fadd double [[TMP5]], 1.000000e+00
+; CHECK-NEXT: br label %[[LATCH]]
+; CHECK: [[LATCH]]:
+; CHECK-NEXT: [[TMP]] = phi double [ [[TMP6]], %[[HEADER]] ]
+; CHECK-NEXT: br i1 [[ARG]], label %[[HEADER]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[R:%.*]] = phi double [ [[TMP]], %[[LATCH]] ]
+; CHECK-NEXT: ret double [[R]]
+;
+entry:
br label %header
-latch: ; preds = %header
- %tmp = phi double [ %tmp6, %header ]
- br i1 %arg, label %header, label %bb2
-
-bb2: ; preds = %latch
- %tmp3 = phi double [ %tmp, %latch ]
- ret void
-
-header: ; preds = %latch, %bb
- %tmp5 = phi double [ 1.300000e+01, %bb ], [ %tmp, %latch ]
+header:
+ %tmp5 = phi double [ 1.300000e+01, %entry ], [ %tmp, %latch ]
%tmp6 = fadd double %tmp5, 1.000000e+00
br label %latch
+
+latch:
+ %tmp = phi double [ %tmp6, %header ]
+ br i1 %arg, label %header, label %exit
+
+exit:
+ %r = phi double [ %tmp, %latch ]
+ ret double %r
}
!1 = !{!"function_entry_count", i64 801}
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll b/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll
index 7d35ad0..855a0ce 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll
@@ -60,11 +60,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP25]])
; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
@@ -162,11 +158,7 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP43]])
; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
@@ -267,11 +259,7 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
-; CHECK: .lr.ph:
-; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
; CHECK: ._crit_edge:
; CHECK-NEXT: [[PROD_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP42]])
; CHECK-NEXT: ret i32 [[PROD_0_LCSSA]]
@@ -371,11 +359,7 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP42]])
; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
@@ -475,11 +459,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP42]])
; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
@@ -579,11 +559,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP42]])
; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
@@ -683,11 +659,7 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP42]])
; CHECK-NEXT: ret float [[RESULT_0_LCSSA]]
@@ -787,11 +759,7 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP42]])
; CHECK-NEXT: ret float [[RESULT_0_LCSSA]]
@@ -874,11 +842,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP25]])
; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
@@ -959,11 +923,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP25]])
; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction.ll b/llvm/test/Transforms/LoopVectorize/reduction.ll
index 916a83a..65d5701 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction.ll
@@ -775,21 +775,7 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) {
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[IF_THEN:%.*]], label [[FOR_INC:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: br i1 poison, label [[IF_THEN8:%.*]], label [[IF_ELSE:%.*]]
-; CHECK: if.then8:
-; CHECK-NEXT: br label [[FOR_INC]]
-; CHECK: if.else:
-; CHECK-NEXT: br i1 poison, label [[IF_THEN16:%.*]], label [[FOR_INC]]
-; CHECK: if.then16:
-; CHECK-NEXT: br label [[FOR_INC]]
-; CHECK: for.inc:
-; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]]
+; CHECK-NEXT: br label [[FOR_INC:%.*]]
; CHECK: for.end:
; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[PREDPHI3]])
; CHECK-NEXT: ret float [[SUM_1_LCSSA]]
diff --git a/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll
index e6ad593..e621b80 100644
--- a/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll
@@ -24,20 +24,8 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[BODY:.*]]
-; CHECK: [[BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_TMP:%.*]] = phi i32 [ [[SUM:%.*]], %[[BODY]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[GEP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[LOAD0:%.*]] = load i32, ptr [[GEP0]], align 4
-; CHECK-NEXT: [[SUM]] = add i32 [[SUM_TMP]], [[LOAD0]]
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 256
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[BODY]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM]], %[[BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP4]]
;
entry:
br label %body
diff --git a/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll b/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll
index 826696f..0896848 100644
--- a/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll
@@ -25,22 +25,8 @@ define i32 @preserve_inbounds(i64 %start, ptr %ptr) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[REV_IND:%.*]] = phi i64 [ [[START]], %[[SCALAR_PH]] ], [ [[REV_IND_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[REDUX:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[REDUX_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[REV_IND_NEXT]] = add i64 [[REV_IND]], -1
-; CHECK-NEXT: [[GEP_PTR_IND:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[REV_IND_NEXT]]
-; CHECK-NEXT: [[LD_PTR:%.*]] = load i32, ptr [[GEP_PTR_IND]], align 4
-; CHECK-NEXT: [[REDUX_NEXT]] = add i32 [[LD_PTR]], [[REDUX]]
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp ne i32 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[END]]
; CHECK: [[END]]:
-; CHECK-NEXT: [[REDUX_NEXT_LCSSA:%.*]] = phi i32 [ [[REDUX_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[REDUX_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
br label %loop
@@ -85,22 +71,8 @@ define i32 @preserve_nusw(i64 %start, ptr %ptr) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[REV_IND:%.*]] = phi i64 [ [[START]], %[[SCALAR_PH]] ], [ [[REV_IND_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[REDUX:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[REDUX_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[REV_IND_NEXT]] = add i64 [[REV_IND]], -1
-; CHECK-NEXT: [[GEP_PTR_IND:%.*]] = getelementptr nusw i32, ptr [[PTR]], i64 [[REV_IND_NEXT]]
-; CHECK-NEXT: [[LD_PTR:%.*]] = load i32, ptr [[GEP_PTR_IND]], align 4
-; CHECK-NEXT: [[REDUX_NEXT]] = add i32 [[LD_PTR]], [[REDUX]]
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp ne i32 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[END]]
; CHECK: [[END]]:
-; CHECK-NEXT: [[REDUX_NEXT_LCSSA:%.*]] = phi i32 [ [[REDUX_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[REDUX_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
br label %loop
@@ -145,22 +117,8 @@ define i32 @drop_nuw(i64 %start, ptr %ptr) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[REV_IND:%.*]] = phi i64 [ [[START]], %[[SCALAR_PH]] ], [ [[REV_IND_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[REDUX:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[REDUX_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[REV_IND_NEXT]] = add i64 [[REV_IND]], -1
-; CHECK-NEXT: [[GEP_PTR_IND:%.*]] = getelementptr nuw i32, ptr [[PTR]], i64 [[REV_IND_NEXT]]
-; CHECK-NEXT: [[LD_PTR:%.*]] = load i32, ptr [[GEP_PTR_IND]], align 4
-; CHECK-NEXT: [[REDUX_NEXT]] = add i32 [[LD_PTR]], [[REDUX]]
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp ne i32 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[END]]
; CHECK: [[END]]:
-; CHECK-NEXT: [[REDUX_NEXT_LCSSA:%.*]] = phi i32 [ [[REDUX_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[REDUX_NEXT_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP6]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/reverse_induction.ll b/llvm/test/Transforms/LoopVectorize/reverse_induction.ll
index 5790921..31129d3 100644
--- a/llvm/test/Transforms/LoopVectorize/reverse_induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/reverse_induction.ll
@@ -37,22 +37,8 @@ define i32 @reverse_induction_i64(i64 %startval, ptr %ptr) {
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP11]], [[TMP10]]
; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
; CHECK-NEXT: br label %[[LOOPEND:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[ADD_I7:%.*]] = phi i64 [ [[STARTVAL]], %[[SCALAR_PH]] ], [ [[ADD_I:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[I_06:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC4:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[REDUX5:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC_REDUX:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ADD_I]] = add i64 [[ADD_I7]], -1
-; CHECK-NEXT: [[KIND__I:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD_I]]
-; CHECK-NEXT: [[TMP_I1:%.*]] = load i32, ptr [[KIND__I]], align 4
-; CHECK-NEXT: [[INC_REDUX]] = add i32 [[TMP_I1]], [[REDUX5]]
-; CHECK-NEXT: [[INC4]] = add i32 [[I_06]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC4]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[LOOPEND]]
; CHECK: [[LOOPEND]]:
-; CHECK-NEXT: [[INC_REDUX_LCSSA:%.*]] = phi i32 [ [[INC_REDUX]], %[[FOR_BODY]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[INC_REDUX_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP13]]
;
entry:
br label %for.body
@@ -105,22 +91,8 @@ define i32 @reverse_induction_i128(i128 %startval, ptr %ptr) {
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP11]], [[TMP10]]
; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
; CHECK-NEXT: br label %[[LOOPEND:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[ADD_I7:%.*]] = phi i128 [ [[STARTVAL]], %[[SCALAR_PH]] ], [ [[ADD_I:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[I_06:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC4:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[REDUX5:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC_REDUX:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[ADD_I]] = add i128 [[ADD_I7]], -1
-; CHECK-NEXT: [[KIND__I:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i128 [[ADD_I]]
-; CHECK-NEXT: [[TMP_I1:%.*]] = load i32, ptr [[KIND__I]], align 4
-; CHECK-NEXT: [[INC_REDUX]] = add i32 [[TMP_I1]], [[REDUX5]]
-; CHECK-NEXT: [[INC4]] = add i32 [[I_06]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC4]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[LOOPEND]]
; CHECK: [[LOOPEND]]:
-; CHECK-NEXT: [[INC_REDUX_LCSSA:%.*]] = phi i32 [ [[INC_REDUX]], %[[FOR_BODY]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[INC_REDUX_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP13]]
;
entry:
br label %for.body
@@ -263,19 +235,6 @@ define void @reverse_forward_induction_i64_i8() {
; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[WHILE_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
-; CHECK: [[WHILE_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 1023, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ]
-; CHECK-NEXT: [[FORWARD_INDUCTION_05:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[WHILE_BODY]] ]
-; CHECK-NEXT: [[INC]] = add i8 [[FORWARD_INDUCTION_05]], 1
-; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[INC]] to i32
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr @a, i64 0, i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], -1
-; CHECK-NEXT: [[TMP13:%.*]] = trunc i64 [[INDVARS_IV]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP13]], 0
-; CHECK-NEXT: br i1 [[CMP]], label %[[WHILE_BODY]], label %[[WHILE_END]]
; CHECK: [[WHILE_END]]:
; CHECK-NEXT: ret void
;
@@ -329,19 +288,6 @@ define void @reverse_forward_induction_i64_i8_signed() {
; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[WHILE_END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
-; CHECK: [[WHILE_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 1023, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ]
-; CHECK-NEXT: [[FORWARD_INDUCTION_05:%.*]] = phi i8 [ -127, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[WHILE_BODY]] ]
-; CHECK-NEXT: [[INC]] = add i8 [[FORWARD_INDUCTION_05]], 1
-; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[INC]] to i32
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr @a, i64 0, i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], -1
-; CHECK-NEXT: [[TMP13:%.*]] = trunc i64 [[INDVARS_IV]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP13]], 0
-; CHECK-NEXT: br i1 [[CMP]], label %[[WHILE_BODY]], label %[[WHILE_END]]
; CHECK: [[WHILE_END]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check.ll b/llvm/test/Transforms/LoopVectorize/runtime-check.ll
index 79fdc07..f87be5a 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check.ll
@@ -429,13 +429,9 @@ define dso_local void @forced_optsize(ptr noalias nocapture readonly %x_p, ptr n
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
;
; FORCED_OPTSIZE-LABEL: @forced_optsize(
; FORCED_OPTSIZE-NEXT: entry:
diff --git a/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll b/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll
index a43ea07d..c7b2704 100644
--- a/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll
+++ b/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll
@@ -19,60 +19,49 @@ define void @test_pr63368(i1 %c, ptr %A) {
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
+; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP0]], [[VECTOR_BODY]] ]
; CHECK-NEXT: br label [[EXIT_1:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_1_HEADER:%.*]]
-; CHECK: loop.1.header:
-; CHECK-NEXT: [[IV_1:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_1_NEXT:%.*]], [[LOOP_1_LATCH:%.*]] ]
-; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[A]], align 4
-; CHECK-NEXT: br i1 [[C]], label [[LOOP_1_LATCH]], label [[LOOP_1_LATCH]]
-; CHECK: loop.1.latch:
-; CHECK-NEXT: [[L_LCSSA:%.*]] = phi i32 [ [[L]], [[LOOP_1_HEADER]] ], [ [[L]], [[LOOP_1_HEADER]] ]
-; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i32 [[IV_1]], 1
-; CHECK-NEXT: [[EC_1:%.*]] = icmp eq i32 [[IV_1_NEXT]], 100
-; CHECK-NEXT: br i1 [[EC_1]], label [[EXIT_1]], label [[LOOP_1_HEADER]]
; CHECK: exit.1:
-; CHECK-NEXT: [[L_LCSSA_LCSSA:%.*]] = phi i32 [ [[L_LCSSA]], [[LOOP_1_LATCH]] ], [ [[TMP0]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[SMAX1:%.*]] = call i32 @llvm.smax.i32(i32 [[L_LCSSA_LCSSA]], i32 -1)
+; CHECK-NEXT: [[SMAX1:%.*]] = call i32 @llvm.smax.i32(i32 [[DOTLCSSA]], i32 -1)
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[SMAX1]], 2
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP2]], 4
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH2:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; CHECK: vector.scevcheck:
-; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[L_LCSSA_LCSSA]], i32 -1)
+; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 poison, i32 -1)
; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[SMAX]], 1
; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8
; CHECK-NEXT: [[TMP5:%.*]] = add i8 1, [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], 1
; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP3]], 255
; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[SCALAR_PH2]], label [[VECTOR_PH3:%.*]]
-; CHECK: vector.ph3:
+; CHECK-NEXT: br i1 [[TMP8]], label [[SCALAR_PH]], label [[VECTOR_PH2:%.*]]
+; CHECK: vector.ph2:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP2]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP9:%.*]] = trunc i32 [[N_VEC]] to i8
-; CHECK-NEXT: br label [[VECTOR_BODY4:%.*]]
-; CHECK: vector.body4:
-; CHECK-NEXT: [[INDEX5:%.*]] = phi i32 [ 0, [[VECTOR_PH3]] ], [ [[INDEX_NEXT6:%.*]], [[VECTOR_BODY4]] ]
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX5]] to i8
+; CHECK-NEXT: br label [[VECTOR_BODY3:%.*]]
+; CHECK: vector.body3:
+; CHECK-NEXT: [[INDEX4:%.*]] = phi i32 [ 0, [[VECTOR_PH2]] ], [ [[INDEX_NEXT5:%.*]], [[VECTOR_BODY3]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX4]] to i8
; CHECK-NEXT: [[TMP10:%.*]] = add i8 [[OFFSET_IDX]], 1
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[A]], i8 [[TMP10]]
; CHECK-NEXT: store <4 x i8> zeroinitializer, ptr [[TMP11]], align 1
-; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i32 [[INDEX5]], 4
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT6]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK7:%.*]], label [[VECTOR_BODY4]], !llvm.loop [[LOOP3:![0-9]+]]
-; CHECK: middle.block7:
+; CHECK-NEXT: [[INDEX_NEXT5]] = add nuw i32 [[INDEX4]], 4
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT5]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK6:%.*]], label [[VECTOR_BODY3]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: middle.block6:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT_2:%.*]], label [[SCALAR_PH2]]
-; CHECK: scalar.ph2:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ [[TMP9]], [[MIDDLE_BLOCK7]] ], [ 0, [[EXIT_1]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT_2:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ [[TMP9]], [[MIDDLE_BLOCK6]] ], [ 0, [[EXIT_1]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; CHECK-NEXT: br label [[LOOP_2:%.*]]
; CHECK: loop.2:
-; CHECK-NEXT: [[IV_2:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH2]] ], [ [[IV_2_NEXT:%.*]], [[LOOP_2]] ]
+; CHECK-NEXT: [[IV_2:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP_2]] ]
; CHECK-NEXT: [[IV_2_NEXT]] = add i8 [[IV_2]], 1
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i8 [[IV_2_NEXT]]
; CHECK-NEXT: store i8 0, ptr [[GEP_A]], align 1
; CHECK-NEXT: [[IV_2_SEXT:%.*]] = sext i8 [[IV_2]] to i32
-; CHECK-NEXT: [[EC_2:%.*]] = icmp sge i32 [[L_LCSSA_LCSSA]], [[IV_2_SEXT]]
+; CHECK-NEXT: [[EC_2:%.*]] = icmp sge i32 [[DOTLCSSA]], [[IV_2_SEXT]]
; CHECK-NEXT: br i1 [[EC_2]], label [[LOOP_2]], label [[EXIT_2]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: exit.2:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/LoopVectorize/select-neg-cond.ll b/llvm/test/Transforms/LoopVectorize/select-neg-cond.ll
index d620b92..92af828 100644
--- a/llvm/test/Transforms/LoopVectorize/select-neg-cond.ll
+++ b/llvm/test/Transforms/LoopVectorize/select-neg-cond.ll
@@ -20,21 +20,6 @@ define void @neg_cond(ptr noalias %p, ptr noalias %q) {
; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[P_GEP:%.*]] = getelementptr i32, ptr [[P]], i32 [[IV]]
-; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[P_GEP]], align 4
-; CHECK-NEXT: [[Q_GEP:%.*]] = getelementptr i32, ptr [[Q]], i32 [[IV]]
-; CHECK-NEXT: [[Y:%.*]] = load i32, ptr [[Q_GEP]], align 4
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 42
-; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[CMP]], true
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[NOT]], i32 42, i32 43
-; CHECK-NEXT: store i32 [[SEL]], ptr [[P_GEP]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i32 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll b/llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll
index b87cf90..f4d5a84 100644
--- a/llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll
+++ b/llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll
@@ -25,21 +25,8 @@ define i64 @pr62565_incoming_value_known_undef(i64 %a, ptr %src) {
; CHECK-NEXT: [[TMP5:%.*]] = freeze i1 [[TMP4]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP5]], i64 [[A]], i64 undef
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i64 [ undef, [[SCALAR_PH]] ], [ [[SELECT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4
-; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 1
-; CHECK-NEXT: [[SELECT]] = select i1 [[C]], i64 [[RED]], i64 [[A]]
-; CHECK-NEXT: [[ADD]] = add nuw i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 32
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[SELECT]], [[LOOP]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[PHI]]
+; CHECK-NEXT: ret i64 [[RDX_SELECT]]
;
entry:
br label %loop
@@ -83,21 +70,8 @@ define i64 @pr62565_incoming_value_known_poison(i64 %a, ptr %src) {
; CHECK-NEXT: [[TMP5:%.*]] = freeze i1 [[TMP4]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP5]], i64 [[A]], i64 poison
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i64 [ poison, [[SCALAR_PH]] ], [ [[SELECT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4
-; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 1
-; CHECK-NEXT: [[SELECT]] = select i1 [[C]], i64 [[RED]], i64 [[A]]
-; CHECK-NEXT: [[ADD]] = add nuw i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 32
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[SELECT]], [[LOOP]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[PHI]]
+; CHECK-NEXT: ret i64 [[RDX_SELECT]]
;
entry:
br label %loop
@@ -141,21 +115,8 @@ define i64 @pr62565_incoming_value_may_be_poison(i64 %a, ptr %src, i64 %start) {
; CHECK-NEXT: [[TMP5:%.*]] = freeze i1 [[TMP4]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP5]], i64 [[A]], i64 [[START]]
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[START]], [[SCALAR_PH]] ], [ [[SELECT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4
-; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 1
-; CHECK-NEXT: [[SELECT]] = select i1 [[C]], i64 [[RED]], i64 [[A]]
-; CHECK-NEXT: [[ADD]] = add nuw i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 32
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[SELECT]], [[LOOP]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[PHI]]
+; CHECK-NEXT: ret i64 [[RDX_SELECT]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/select-reduction.ll b/llvm/test/Transforms/LoopVectorize/select-reduction.ll
index 0fd780e..1f5646d 100644
--- a/llvm/test/Transforms/LoopVectorize/select-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/select-reduction.ll
@@ -36,22 +36,11 @@ define i32 @test(i64 %N, i32 %x) {
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP3]])
-; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[NEXT:%.*]] = phi i32 [ [[SEL:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[EXTRA_ITER]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[SEL_COND:%.*]] = icmp sgt i32 [[NEXT]], 10
-; CHECK-NEXT: [[SEL]] = select i1 [[SEL_COND]], i32 [[NEXT]], i32 10
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 0
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT_LOOPEXIT]], label [[LOOP]]
; CHECK: exit.loopexit:
-; CHECK-NEXT: [[SEL_LCSSA:%.*]] = phi i32 [ [[SEL]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[CHECK]] ], [ [[SEL_LCSSA]], [[EXIT_LOOPEXIT]] ]
+; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[CHECK]] ], [ [[TMP5]], [[LOOP]] ]
; CHECK-NEXT: ret i32 [[RESULT]]
;
entry:
@@ -90,19 +79,9 @@ define i32 @pr66895_tail_fold_reduction_exit_inst_gets_simplified(i32 %n) {
; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[VEC_PHI]])
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 12, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], -1
-; CHECK-NEXT: [[RED_NEXT]] = mul i32 [[RED]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 0
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i32 [[RED_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP3]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll
index edad0b5..794e274 100644
--- a/llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll
+++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll
@@ -40,20 +40,8 @@ define noundef i32 @f(i32 noundef %g) {
; VF4IC2-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
; VF4IC2-NEXT: [[TMP16:%.*]] = add i32 0, [[TMP15]]
; VF4IC2-NEXT: br label %[[RETURN]]
-; VF4IC2: [[SCALAR_PH:.*]]:
-; VF4IC2-NEXT: br label %[[LOOP_HEADER:.*]]
-; VF4IC2: [[LOOP_HEADER]]:
-; VF4IC2-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; VF4IC2-NEXT: [[MUL:%.*]] = shl nuw nsw i32 [[IV]], 3
-; VF4IC2-NEXT: [[SHR:%.*]] = ashr i32 [[G]], [[MUL]]
-; VF4IC2-NEXT: [[EARLY_COND:%.*]] = icmp eq i32 [[SHR]], 0
-; VF4IC2-NEXT: br i1 [[EARLY_COND]], label %[[LOOP_LATCH]], label %[[RETURN]]
-; VF4IC2: [[LOOP_LATCH]]:
-; VF4IC2-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; VF4IC2-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 8
-; VF4IC2-NEXT: br i1 [[EC]], label %[[RETURN]], label %[[LOOP_HEADER]]
; VF4IC2: [[RETURN]]:
-; VF4IC2-NEXT: [[RES:%.*]] = phi i32 [ [[SHR]], %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ], [ [[TMP16]], %[[VECTOR_EARLY_EXIT]] ]
+; VF4IC2-NEXT: [[RES:%.*]] = phi i32 [ [[TMP8]], %[[MIDDLE_BLOCK]] ], [ [[TMP16]], %[[VECTOR_EARLY_EXIT]] ]
; VF4IC2-NEXT: ret i32 [[RES]]
;
; VF8IC1-LABEL: define noundef i32 @f(
@@ -80,20 +68,8 @@ define noundef i32 @f(i32 noundef %g) {
; VF8IC1-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
; VF8IC1-NEXT: [[TMP7:%.*]] = add i32 0, [[TMP6]]
; VF8IC1-NEXT: br label %[[RETURN]]
-; VF8IC1: [[SCALAR_PH:.*]]:
-; VF8IC1-NEXT: br label %[[LOOP_HEADER:.*]]
-; VF8IC1: [[LOOP_HEADER]]:
-; VF8IC1-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; VF8IC1-NEXT: [[MUL:%.*]] = shl nuw nsw i32 [[IV]], 3
-; VF8IC1-NEXT: [[SHR:%.*]] = ashr i32 [[G]], [[MUL]]
-; VF8IC1-NEXT: [[EARLY_COND:%.*]] = icmp eq i32 [[SHR]], 0
-; VF8IC1-NEXT: br i1 [[EARLY_COND]], label %[[LOOP_LATCH]], label %[[RETURN]]
-; VF8IC1: [[LOOP_LATCH]]:
-; VF8IC1-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; VF8IC1-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 8
-; VF8IC1-NEXT: br i1 [[EC]], label %[[RETURN]], label %[[LOOP_HEADER]]
; VF8IC1: [[RETURN]]:
-; VF8IC1-NEXT: [[RES:%.*]] = phi i32 [ [[SHR]], %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ], [ [[TMP7]], %[[VECTOR_EARLY_EXIT]] ]
+; VF8IC1-NEXT: [[RES:%.*]] = phi i32 [ [[TMP4]], %[[MIDDLE_BLOCK]] ], [ [[TMP7]], %[[VECTOR_EARLY_EXIT]] ]
; VF8IC1-NEXT: ret i32 [[RES]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll
index b1b3a3f..9620697 100644
--- a/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll
+++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll
@@ -9,9 +9,9 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_si
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i64 4), "dereferenceable"(ptr [[P2]], i64 1024) ]
; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX1]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX1]]
@@ -22,7 +22,7 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_si
; CHECK-NEXT: [[TMP5:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP3]])
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 1024
; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
-; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_SPLIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_SPLIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_SPLIT]]:
; CHECK-NEXT: br i1 [[TMP5]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -31,22 +31,8 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_si
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP4]], i1 true)
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], [[TMP8]]
; CHECK-NEXT: br label %[[LOOP_END]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP1:.*]]
-; CHECK: [[LOOP1]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], %[[LOOP_INC:.*]] ], [ 0, %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label %[[LOOP_INC]], label %[[LOOP_END]]
-; CHECK: [[LOOP_INC]]:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP1]], label %[[LOOP_END]]
; CHECK: [[LOOP_END]]:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], %[[LOOP1]] ], [ -1, %[[LOOP_INC]] ], [ -1, %[[MIDDLE_BLOCK]] ], [ [[TMP9]], %[[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ -1, %[[MIDDLE_BLOCK]] ], [ [[TMP9]], %[[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -331,9 +317,9 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_n_not_zero_i16_p
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], 2
; CHECK-NEXT: [[IV_NEXT1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
-; CHECK-NEXT: br label %[[LOOP_HEADER1:.*]]
-; CHECK: [[LOOP_HEADER1]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[LOOP_HEADER1]] ]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[NEXT_GEP]], align 2
@@ -343,10 +329,10 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_n_not_zero_i16_p
; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_SPLIT:.*]], label %[[LOOP_HEADER1]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_SPLIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_SPLIT]]:
-; CHECK-NEXT: br i1 [[TMP7]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[LOOP_LATCH1:.*]]
-; CHECK: [[LOOP_LATCH1]]:
+; CHECK-NEXT: br i1 [[TMP7]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[MIDDLE_BLOCK:.*]]
+; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_PH]]
; CHECK: [[VECTOR_EARLY_EXIT]]:
@@ -356,10 +342,10 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_n_not_zero_i16_p
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP12]]
; CHECK-NEXT: br label %[[EXIT_LOOPEXIT]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi ptr [ [[IV_NEXT1]], %[[LOOP_LATCH1]] ], [ [[A]], %[[LOOP_HEADER_PREHEADER]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IV_NEXT1]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[LOOP_HEADER_PREHEADER]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[IV1]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[IV]], align 2
; CHECK-NEXT: [[C_0:%.*]] = icmp eq i16 [[L]], 0
; CHECK-NEXT: br i1 [[C_0]], label %[[EXIT_LOOPEXIT]], label %[[LOOP_LATCH]]
@@ -368,7 +354,7 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_n_not_zero_i16_p
; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[IV_NEXT]], [[A_END]]
; CHECK-NEXT: br i1 [[EC]], label %[[EXIT_LOOPEXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[EXIT_LOOPEXIT]]:
-; CHECK-NEXT: [[P_PH:%.*]] = phi ptr [ [[A_END]], %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ [[A_END]], %[[LOOP_LATCH1]] ], [ [[TMP13]], %[[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[P_PH:%.*]] = phi ptr [ [[A_END]], %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ [[A_END]], %[[MIDDLE_BLOCK]] ], [ [[TMP13]], %[[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: br label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[P:%.*]] = phi ptr [ [[A]], %[[ENTRY]] ], [ [[P_PH]], %[[EXIT_LOOPEXIT]] ]
@@ -514,3 +500,50 @@ exit:
%first.addr.0.lcssa.i = phi ptr [ %first, %entry ], [ %iv, %loop.header ], [ %iv.next, %loop.latch ]
ret ptr %first.addr.0.lcssa.i
}
+
+define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_size_nofree_via_context(ptr noalias %p1, ptr noalias %p2) nosync {
+; CHECK-LABEL: define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_size_nofree_via_context(
+; CHECK-SAME: ptr noalias [[P1:%.*]], ptr noalias [[P2:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P1]], i64 4), "dereferenceable"(ptr [[P1]], i64 1024) ]
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i64 4), "dereferenceable"(ptr [[P2]], i64 1024) ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], %[[LOOP_INC:.*]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX1]]
+; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX1]]
+; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
+; CHECK-NEXT: br i1 [[CMP3]], label %[[LOOP_INC]], label %[[LOOP_END:.*]]
+; CHECK: [[LOOP_INC]]:
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX1]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[LOOP_END]]
+; CHECK: [[LOOP_END]]:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX1]], %[[LOOP]] ], [ -1, %[[LOOP_INC]] ]
+; CHECK-NEXT: ret i64 [[RETVAL]]
+;
+entry:
+ call void @llvm.assume(i1 true) [ "align"(ptr %p1, i64 4), "dereferenceable"(ptr %p1, i64 1024) ]
+ call void @llvm.assume(i1 true) [ "align"(ptr %p2, i64 4), "dereferenceable"(ptr %p2, i64 1024) ]
+ br label %loop
+
+loop:
+ %index = phi i64 [ %index.next, %loop.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
+ %ld1 = load i8, ptr %arrayidx, align 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
+ %ld2 = load i8, ptr %arrayidx1, align 1
+ %cmp3 = icmp eq i8 %ld1, %ld2
+ br i1 %cmp3, label %loop.inc, label %loop.end
+
+loop.inc:
+ %index.next = add i64 %index, 1
+ %exitcond = icmp ne i64 %index.next, 1024
+ br i1 %exitcond, label %loop, label %loop.end
+
+loop.end:
+ %retval = phi i64 [ %index, %loop ], [ -1, %loop.inc ]
+ ret i64 %retval
+}
diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll
index b630557..d8e62c7 100644
--- a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll
+++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll
@@ -43,24 +43,10 @@ define i64 @multi_exiting_to_different_exits_live_in_exit_values() {
; VF4IC4-NEXT: br label %[[E2:.*]]
; VF4IC4: [[VECTOR_EARLY_EXIT]]:
; VF4IC4-NEXT: br label %[[E1:.*]]
-; VF4IC4: [[SCALAR_PH:.*]]:
-; VF4IC4-NEXT: br label %[[LOOP_HEADER:.*]]
-; VF4IC4: [[LOOP_HEADER]]:
-; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[INC:%.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[SCALAR_PH]] ]
-; VF4IC4-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]]
-; VF4IC4-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4
-; VF4IC4-NEXT: [[C_1:%.*]] = icmp eq i32 [[L]], 10
-; VF4IC4-NEXT: br i1 [[C_1]], label %[[E1]], label %[[LOOP_LATCH]]
-; VF4IC4: [[LOOP_LATCH]]:
-; VF4IC4-NEXT: [[INC]] = add nuw i64 [[IV]], 1
-; VF4IC4-NEXT: [[C_2:%.*]] = icmp eq i64 [[INC]], 128
-; VF4IC4-NEXT: br i1 [[C_2]], label %[[E2]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]]
; VF4IC4: [[E1]]:
-; VF4IC4-NEXT: [[P1:%.*]] = phi i64 [ 0, %[[LOOP_HEADER]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ]
-; VF4IC4-NEXT: ret i64 [[P1]]
+; VF4IC4-NEXT: ret i64 0
; VF4IC4: [[E2]]:
-; VF4IC4-NEXT: [[P2:%.*]] = phi i64 [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ]
-; VF4IC4-NEXT: ret i64 [[P2]]
+; VF4IC4-NEXT: ret i64 1
;
entry:
%src = alloca [128 x i32]
@@ -94,6 +80,4 @@ e2:
; VF4IC4: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; VF4IC4: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; VF4IC4: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; VF4IC4: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]}
-; VF4IC4: [[META4]] = !{!"llvm.loop.interleave.count", i32 4}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll
index 6836f7b..a50ce96 100644
--- a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll
+++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll
@@ -46,21 +46,9 @@ define i8 @iv_used_in_exit_with_math(i8 noundef %g) {
; CHECK-NEXT: [[TMP20:%.*]] = trunc i32 [[TMP19]] to i8
; CHECK-NEXT: [[TMP23:%.*]] = trunc i32 [[TMP19]] to i8
; CHECK-NEXT: br label %[[RETURN]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[S:%.*]] = shl nuw i8 1, [[IV]]
-; CHECK-NEXT: [[A:%.*]] = and i8 [[S]], [[G]]
-; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A]], 0
-; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[RETURN]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i8 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[IV_NEXT]], 4
-; CHECK-NEXT: br i1 [[EC]], label %[[RETURN]], label %[[LOOP_HEADER]]
; CHECK: [[RETURN]]:
-; CHECK-NEXT: [[RES_IV1:%.*]] = phi i8 [ 32, %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ 32, %[[MIDDLE_BLOCK]] ], [ [[TMP20]], %[[VECTOR_EARLY_EXIT]] ]
-; CHECK-NEXT: [[RES_IV2:%.*]] = phi i8 [ 0, %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ 0, %[[MIDDLE_BLOCK]] ], [ [[TMP23]], %[[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RES_IV1:%.*]] = phi i8 [ 32, %[[MIDDLE_BLOCK]] ], [ [[TMP20]], %[[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RES_IV2:%.*]] = phi i8 [ 0, %[[MIDDLE_BLOCK]] ], [ [[TMP23]], %[[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: [[RES:%.*]] = add i8 [[RES_IV1]], [[RES_IV2]]
; CHECK-NEXT: ret i8 [[RES]]
;
@@ -125,21 +113,9 @@ define i32 @iv_used_in_exit_with_loads(ptr align 4 dereferenceable(128) %src) {
; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
; CHECK-NEXT: [[TMP29:%.*]] = add i32 [[INDEX]], [[TMP28]]
; CHECK-NEXT: br label %[[RETURN]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4
-; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0
-; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[RETURN]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 32
-; CHECK-NEXT: br i1 [[EC]], label %[[RETURN]], label %[[LOOP_HEADER]]
; CHECK: [[RETURN]]:
-; CHECK-NEXT: [[RES_IV1:%.*]] = phi i32 [ 32, %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ 32, %[[MIDDLE_BLOCK]] ], [ [[TMP29]], %[[VECTOR_EARLY_EXIT]] ]
-; CHECK-NEXT: [[RES_IV2:%.*]] = phi i32 [ 0, %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ 0, %[[MIDDLE_BLOCK]] ], [ [[TMP29]], %[[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RES_IV1:%.*]] = phi i32 [ 32, %[[MIDDLE_BLOCK]] ], [ [[TMP29]], %[[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RES_IV2:%.*]] = phi i32 [ 0, %[[MIDDLE_BLOCK]] ], [ [[TMP29]], %[[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_IV1]], [[RES_IV2]]
; CHECK-NEXT: ret i32 [[RES]]
;
diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll
index a4ce68f..ed5dcc7 100644
--- a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll
@@ -42,25 +42,11 @@ define i64 @multi_exiting_to_different_exits_live_in_exit_values() {
; VF4IC4: middle.block:
; VF4IC4-NEXT: br label [[E2:%.*]]
; VF4IC4: vector.early.exit:
-; VF4IC4-NEXT: br label [[E1:%.*]]
-; VF4IC4: scalar.ph:
; VF4IC4-NEXT: br label [[LOOP_HEADER:%.*]]
-; VF4IC4: loop.header:
-; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[INC:%.*]], [[LOOP_LATCH:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; VF4IC4-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]]
-; VF4IC4-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4
-; VF4IC4-NEXT: [[C_1:%.*]] = icmp eq i32 [[L]], 10
-; VF4IC4-NEXT: br i1 [[C_1]], label [[E1]], label [[LOOP_LATCH]]
-; VF4IC4: loop.latch:
-; VF4IC4-NEXT: [[INC]] = add nuw i64 [[IV]], 1
-; VF4IC4-NEXT: [[C_2:%.*]] = icmp eq i64 [[INC]], 128
-; VF4IC4-NEXT: br i1 [[C_2]], label [[E2]], label [[LOOP_HEADER]]
; VF4IC4: e1:
-; VF4IC4-NEXT: [[P1:%.*]] = phi i64 [ 0, [[LOOP_HEADER]] ], [ 0, [[VECTOR_EARLY_EXIT]] ]
-; VF4IC4-NEXT: ret i64 [[P1]]
+; VF4IC4-NEXT: ret i64 0
; VF4IC4: e2:
-; VF4IC4-NEXT: [[P2:%.*]] = phi i64 [ 1, [[LOOP_LATCH]] ], [ 1, [[MIDDLE_BLOCK]] ]
-; VF4IC4-NEXT: ret i64 [[P2]]
+; VF4IC4-NEXT: ret i64 1
;
entry:
%src = alloca [128 x i32]
@@ -155,22 +141,8 @@ define i64 @same_exit_block_pre_inc_use1() {
; VF4IC4-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]]
; VF4IC4-NEXT: [[TMP10:%.*]] = add i64 3, [[TMP9]]
; VF4IC4-NEXT: br label [[LOOP_END]]
-; VF4IC4: scalar.ph:
-; VF4IC4-NEXT: br label [[LOOP:%.*]]
-; VF4IC4: loop:
-; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]]
-; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1
-; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]]
-; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1
-; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; VF4IC4: loop.inc:
-; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 67
-; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; VF4IC4: loop.end:
-; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i64 [ [[IV]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ]
+; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ]
; VF4IC4-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -256,19 +228,8 @@ define ptr @same_exit_block_pre_inc_use1_ivptr() {
; VF4IC4-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], [[TMP6]]
; VF4IC4-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP7]]
; VF4IC4-NEXT: br label [[LOOP_END]]
-; VF4IC4: scalar.ph:
-; VF4IC4-NEXT: br label [[LOOP:%.*]]
-; VF4IC4: loop:
-; VF4IC4-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[P1]], [[SCALAR_PH:%.*]] ]
-; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[PTR]], align 1
-; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], 72
-; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; VF4IC4: loop.inc:
-; VF4IC4-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
-; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[PTR_NEXT]], [[PTREND]]
-; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; VF4IC4: loop.end:
-; VF4IC4-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTR]], [[LOOP]] ], [ [[PTREND]], [[LOOP_INC]] ], [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[TMP8]], [[VECTOR_EARLY_EXIT]] ]
+; VF4IC4-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[TMP8]], [[VECTOR_EARLY_EXIT]] ]
; VF4IC4-NEXT: ret ptr [[RETVAL]]
;
entry:
@@ -360,22 +321,8 @@ define i64 @same_exit_block_post_inc_use() {
; VF4IC4-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]]
; VF4IC4-NEXT: [[TMP10:%.*]] = add i64 3, [[TMP9]]
; VF4IC4-NEXT: br label [[LOOP_END]]
-; VF4IC4: scalar.ph:
-; VF4IC4-NEXT: br label [[LOOP:%.*]]
-; VF4IC4: loop:
-; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]]
-; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1
-; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]]
-; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1
-; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; VF4IC4: loop.inc:
-; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 67
-; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; VF4IC4: loop.end:
-; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i64 [ [[IV]], [[LOOP]] ], [ [[IV_NEXT]], [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ]
+; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ]
; VF4IC4-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -470,27 +417,11 @@ define i64 @diff_exit_block_pre_inc_use1() {
; VF4IC4-NEXT: [[TMP8:%.*]] = select i1 [[TMP32]], i64 [[TMP31]], i64 [[TMP29]]
; VF4IC4-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]]
; VF4IC4-NEXT: [[TMP10:%.*]] = add i64 3, [[TMP9]]
-; VF4IC4-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
-; VF4IC4: scalar.ph:
; VF4IC4-NEXT: br label [[LOOP:%.*]]
-; VF4IC4: loop:
-; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]]
-; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1
-; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]]
-; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1
-; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
-; VF4IC4: loop.inc:
-; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 67
-; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; VF4IC4: loop.early.exit:
-; VF4IC4-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[IV]], [[LOOP]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ]
-; VF4IC4-NEXT: ret i64 [[RETVAL1]]
+; VF4IC4-NEXT: ret i64 [[TMP10]]
; VF4IC4: loop.end:
-; VF4IC4-NEXT: [[RETVAL2:%.*]] = phi i64 [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ]
-; VF4IC4-NEXT: ret i64 [[RETVAL2]]
+; VF4IC4-NEXT: ret i64 67
;
entry:
%p1 = alloca [1024 x i8]
@@ -588,27 +519,11 @@ define i64 @diff_exit_block_post_inc_use1() {
; VF4IC4-NEXT: [[TMP8:%.*]] = select i1 [[TMP32]], i64 [[TMP31]], i64 [[TMP29]]
; VF4IC4-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]]
; VF4IC4-NEXT: [[TMP10:%.*]] = add i64 3, [[TMP9]]
-; VF4IC4-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
-; VF4IC4: scalar.ph:
; VF4IC4-NEXT: br label [[LOOP:%.*]]
-; VF4IC4: loop:
-; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]]
-; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1
-; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]]
-; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1
-; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
-; VF4IC4: loop.inc:
-; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 67
-; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; VF4IC4: loop.early.exit:
-; VF4IC4-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[IV]], [[LOOP]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ]
-; VF4IC4-NEXT: ret i64 [[RETVAL1]]
+; VF4IC4-NEXT: ret i64 [[TMP10]]
; VF4IC4: loop.end:
-; VF4IC4-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[IV_NEXT]], [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ]
-; VF4IC4-NEXT: ret i64 [[RETVAL2]]
+; VF4IC4-NEXT: ret i64 67
;
entry:
%p1 = alloca [1024 x i8]
@@ -847,22 +762,8 @@ define i8 @same_exit_block_use_loaded_value() {
; VF4IC4-NEXT: [[TMP41:%.*]] = icmp uge i64 [[TMP8]], 12
; VF4IC4-NEXT: [[TMP42:%.*]] = select i1 [[TMP41]], i8 [[TMP40]], i8 [[TMP38]]
; VF4IC4-NEXT: br label [[LOOP_END]]
-; VF4IC4: scalar.ph:
-; VF4IC4-NEXT: br label [[LOOP:%.*]]
-; VF4IC4: loop:
-; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ]
-; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]]
-; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1
-; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]]
-; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1
-; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; VF4IC4: loop.inc:
-; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP_END]], label [[LOOP]]
; VF4IC4: loop.end:
-; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i8 [ [[LD1]], [[LOOP]] ], [ -1, [[LOOP_INC]] ], [ -1, [[MIDDLE_BLOCK]] ], [ [[TMP42]], [[VECTOR_EARLY_EXIT]] ]
+; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i8 [ -1, [[MIDDLE_BLOCK]] ], [ [[TMP42]], [[VECTOR_EARLY_EXIT]] ]
; VF4IC4-NEXT: ret i8 [[RETVAL]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll b/llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll
index 219c66f..3bb39b9 100644
--- a/llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll
+++ b/llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll
@@ -29,28 +29,7 @@ define void @single_incoming_phi_no_blend_mask(i64 %a, i64 %b) {
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i16
-; CHECK-NEXT: br label [[LOOP_COND:%.*]]
-; CHECK: loop.cond:
-; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ [[IV_TRUNC]], [[LOOP_HEADER]] ]
-; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @src, i16 0, i16 [[BLEND]]
-; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[SRC_PTR]], align 1
-; CHECK-NEXT: [[CMP_B:%.*]] = icmp sgt i64 [[IV]], [[A]]
-; CHECK-NEXT: br i1 [[CMP_B]], label [[LOOP_NEXT:%.*]], label [[LOOP_LATCH]]
-; CHECK: loop.next:
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[RES:%.*]] = phi i16 [ [[LV]], [[LOOP_COND]] ], [ 1, [[LOOP_NEXT]] ]
-; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[IV]]
-; CHECK-NEXT: store i16 [[RES]], ptr [[DST_PTR]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31
-; CHECK-NEXT: br i1 [[CMP439]], label [[LOOP_HEADER]], label [[EXIT]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -112,29 +91,7 @@ define void @single_incoming_phi_with_blend_mask(i64 %a, i64 %b) {
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i16
-; CHECK-NEXT: [[CMP_A:%.*]] = icmp ugt i64 [[IV]], [[A]]
-; CHECK-NEXT: br i1 [[CMP_A]], label [[LOOP_COND:%.*]], label [[LOOP_LATCH]]
-; CHECK: loop.cond:
-; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ [[IV_TRUNC]], [[LOOP_HEADER]] ]
-; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @src, i16 0, i16 [[BLEND]]
-; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[SRC_PTR]], align 1
-; CHECK-NEXT: [[CMP_B:%.*]] = icmp sgt i64 [[IV]], [[A]]
-; CHECK-NEXT: br i1 [[CMP_B]], label [[LOOP_NEXT:%.*]], label [[LOOP_LATCH]]
-; CHECK: loop.next:
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[RES:%.*]] = phi i16 [ 0, [[LOOP_HEADER]] ], [ [[LV]], [[LOOP_COND]] ], [ 1, [[LOOP_NEXT]] ]
-; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[IV]]
-; CHECK-NEXT: store i16 [[RES]], ptr [[DST_PTR]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31
-; CHECK-NEXT: br i1 [[CMP439]], label [[LOOP_HEADER]], label [[EXIT]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -201,26 +158,7 @@ define void @multiple_incoming_phi_with_blend_mask(i64 %a, ptr noalias %dst) {
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i16
-; CHECK-NEXT: [[IV_TRUNC_2:%.*]] = trunc i64 [[IV]] to i16
-; CHECK-NEXT: [[CMP_A:%.*]] = icmp ugt i64 [[IV]], [[A]]
-; CHECK-NEXT: br i1 [[CMP_A]], label [[LOOP_NEXT:%.*]], label [[LOOP_LATCH]]
-; CHECK: loop.next:
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ [[IV_TRUNC]], [[LOOP_HEADER]] ], [ [[IV_TRUNC_2]], [[LOOP_NEXT]] ]
-; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @src, i16 0, i16 [[BLEND]]
-; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[SRC_PTR]], align 1
-; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i16 [[LV]], ptr [[DST_PTR]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31
-; CHECK-NEXT: br i1 [[CMP439]], label [[LOOP_HEADER]], label [[EXIT]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -297,29 +235,7 @@ define void @single_incoming_needs_predication(i64 %a, i64 %b) {
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i16
-; CHECK-NEXT: [[CMP_A:%.*]] = icmp ugt i64 [[IV]], [[A]]
-; CHECK-NEXT: br i1 [[CMP_A]], label [[LOOP_COND:%.*]], label [[LOOP_LATCH]]
-; CHECK: loop.cond:
-; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ [[IV_TRUNC]], [[LOOP_HEADER]] ]
-; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @src, i16 0, i16 [[BLEND]]
-; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[SRC_PTR]], align 1
-; CHECK-NEXT: [[CMP_B:%.*]] = icmp sgt i64 [[IV]], [[A]]
-; CHECK-NEXT: br i1 [[CMP_B]], label [[LOOP_NEXT:%.*]], label [[LOOP_LATCH]]
-; CHECK: loop.next:
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[RES:%.*]] = phi i16 [ 0, [[LOOP_HEADER]] ], [ [[LV]], [[LOOP_COND]] ], [ 1, [[LOOP_NEXT]] ]
-; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[IV]]
-; CHECK-NEXT: store i16 [[RES]], ptr [[DST_PTR]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 63
-; CHECK-NEXT: br i1 [[CMP439]], label [[LOOP_HEADER]], label [[EXIT]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -371,20 +287,7 @@ define void @duplicated_incoming_blocks_blend(i32 %x, ptr %ptr) {
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[ADD_I:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[C_0:%.*]] = icmp ugt i32 [[IV]], [[X:%.*]]
-; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[P:%.*]] = phi i32 [ [[IV]], [[LOOP_HEADER]] ], [ [[IV]], [[LOOP_HEADER]] ]
-; CHECK-NEXT: [[GEP_PTR:%.*]] = getelementptr i32, ptr [[PTR]], i32 [[P]]
-; CHECK-NEXT: store i32 [[P]], ptr [[GEP_PTR]], align 4
-; CHECK-NEXT: [[ADD_I]] = add nsw i32 [[P]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD_I]], 1000
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_HEADER]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/single_early_exit.ll b/llvm/test/Transforms/LoopVectorize/single_early_exit.ll
index 04f04a8..3500c5c 100644
--- a/llvm/test/Transforms/LoopVectorize/single_early_exit.ll
+++ b/llvm/test/Transforms/LoopVectorize/single_early_exit.ll
@@ -34,22 +34,8 @@ define i64 @same_exit_block_phi_of_consts() {
; CHECK-NEXT: br label [[LOOP_END:%.*]]
; CHECK: vector.early.exit:
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 0, [[LOOP]] ], [ 1, [[LOOP_INC]] ], [ 1, [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 1, [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -108,21 +94,7 @@ define i64 @diff_exit_block_phi_of_consts() {
; CHECK: middle.block:
; CHECK-NEXT: br label [[LOOP_END:%.*]]
; CHECK: vector.early.exit:
-; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.early.exit:
; CHECK-NEXT: ret i64 0
; CHECK: loop.end:
@@ -292,16 +264,7 @@ define i32 @diff_blocks_invariant_early_exit_cond(ptr %s) {
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: vector.early.exit:
-; CHECK-NEXT: br label [[EARLY_EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IND:%.*]] = phi i32 [ -10, [[SCALAR_PH:%.*]] ], [ [[IND_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-NEXT: br i1 [[COND]], label [[FOR_INC]], label [[EARLY_EXIT]]
-; CHECK: for.inc:
-; CHECK-NEXT: [[IND_NEXT]] = add nsw i32 [[IND]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IND_NEXT]], 266
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]]
; CHECK: early.exit:
; CHECK-NEXT: tail call void @abort()
; CHECK-NEXT: unreachable
diff --git a/llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll b/llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll
index 54408b2..79821b8 100644
--- a/llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll
+++ b/llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll
@@ -36,22 +36,8 @@ define i64 @same_exit_block_pre_inc_use1() {
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -116,24 +102,8 @@ define i32 @same_exit_block_pre_inc_use1_iv64_endi32_step2() {
; CHECK-NEXT: [[TMP11:%.*]] = mul i32 [[DOTCAST]], 2
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i32 9, [[TMP11]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[INDEX2:%.*]] = phi i32 [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ 9, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[INDEX2_NEXT]] = add i32 [[INDEX2]], 2
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ [[INDEX2]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i32 [[RETVAL]]
;
entry:
@@ -197,23 +167,8 @@ define i32 @same_exit_block_pre_inc_use1_iv128_endi32_step2() {
; CHECK-NEXT: [[TMP10:%.*]] = mul i32 [[DOTCAST]], 2
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i32 9, [[TMP10]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i128 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[INDEX2:%.*]] = phi i32 [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ 9, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP_INC]] ], [ [[P1]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[PTR]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], 3
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i128 [[INDEX]], 1
-; CHECK-NEXT: [[INDEX2_NEXT]] = add i32 [[INDEX2]], 2
-; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i128 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ [[INDEX2]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i32 [[RETVAL]]
;
entry:
@@ -277,24 +232,8 @@ define float @same_exit_block_pre_inc_use1_iv64_endf32() {
; CHECK-NEXT: [[TMP11:%.*]] = fmul fast float 1.000000e+00, [[DOTCAST]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = fadd fast float 9.000000e+00, [[TMP11]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[INDEX2:%.*]] = phi float [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ 9.000000e+00, [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[INDEX2_NEXT]] = fadd fast float [[INDEX2]], 1.000000e+00
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi float [ [[INDEX2]], [[LOOP]] ], [ 1.230000e+02, [[LOOP_INC]] ], [ 1.230000e+02, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi float [ 1.230000e+02, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret float [[RETVAL]]
;
entry:
@@ -360,24 +299,8 @@ define ptr @same_exit_block_pre_inc_use1_iv64_endptr() {
; CHECK-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 5
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP20]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[INDEX2:%.*]] = phi ptr [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ [[P2]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[INDEX2_NEXT]] = getelementptr i8, ptr [[INDEX2]], i64 5
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[INDEX2]], [[LOOP]] ], [ [[P1]], [[LOOP_INC]] ], [ [[P1]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[P1]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret ptr [[RETVAL]]
;
entry:
@@ -438,19 +361,8 @@ define ptr @same_exit_block_pre_inc_use1_ivptr() {
; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP8]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[P1]], [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[PTR]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], 72
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[PTR_NEXT]], [[PTREND]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTR]], [[LOOP]] ], [ [[PTREND]], [[LOOP_INC]] ], [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret ptr [[RETVAL]]
;
entry:
@@ -512,23 +424,8 @@ define i64 @same_exit_block_pre_inc1_use_inv_cond(i1 %cond) {
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP11]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: [[CMP4:%.*]] = select i1 [[COND]], i1 [[CMP3]], i1 false
-; CHECK-NEXT: br i1 [[CMP4]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -592,22 +489,8 @@ define i64 @same_exit_block_pre_inc_use1_gep_two_indices() {
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], ptr [[P1]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], ptr [[P2]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -670,22 +553,8 @@ define i64 @same_exit_block_pre_inc_use1_alloca_diff_type() {
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -745,22 +614,8 @@ define i64 @same_exit_block_pre_inc_use2() {
; CHECK-NEXT: br label [[LOOP_END:%.*]]
; CHECK: vector.early.exit:
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[LOOP]] ], [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ], [ 67, [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 66, [[MIDDLE_BLOCK]] ], [ 67, [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -823,22 +678,8 @@ define i64 @same_exit_block_pre_inc_use3() {
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[INDEX_LCSSA:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ [[INDEX]], [[LOOP]] ], [ 66, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[INDEX_LCSSA:%.*]] = phi i64 [ 66, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[INDEX_LCSSA]]
;
entry:
@@ -902,20 +743,8 @@ define i64 @same_exit_block_pre_inc_use4() {
; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP8]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i64, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i64 [[INDEX]], [[LD1]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -976,22 +805,8 @@ define i64 @same_exit_block_post_inc_use() {
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[INDEX_NEXT]], [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -1051,19 +866,8 @@ define ptr @same_exit_block_post_inc_use1_ivptr() {
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 1
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP9]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[P1]], [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[PTR]], align 1
-; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], 72
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[PTR_NEXT]], [[PTREND]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTR_NEXT]], [[LOOP]] ], [ [[PTREND]], [[LOOP_INC]] ], [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret ptr [[RETVAL]]
;
entry:
@@ -1123,22 +927,8 @@ define i64 @same_exit_block_post_inc_use2() {
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 1
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP11]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP]] ], [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 66, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -1200,27 +990,11 @@ define i64 @diff_exit_block_pre_inc_use1() {
; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true)
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]]
-; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.early.exit:
-; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
-; CHECK-NEXT: ret i64 [[RETVAL1]]
+; CHECK-NEXT: ret i64 [[EARLY_EXIT_VALUE]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[RETVAL2]]
+; CHECK-NEXT: ret i64 67
;
entry:
%p1 = alloca [1024 x i8]
@@ -1282,27 +1056,11 @@ define i64 @diff_exit_block_pre_inc_use2() {
; CHECK: middle.block:
; CHECK-NEXT: br label [[LOOP_END:%.*]]
; CHECK: vector.early.exit:
-; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.early.exit:
-; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ 67, [[LOOP]] ], [ 67, [[VECTOR_EARLY_EXIT]] ]
-; CHECK-NEXT: ret i64 [[RETVAL1]]
+; CHECK-NEXT: ret i64 67
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[RETVAL2]]
+; CHECK-NEXT: ret i64 66
;
entry:
%p1 = alloca [1024 x i8]
@@ -1367,27 +1125,11 @@ define i64 @diff_exit_block_pre_inc_use3() {
; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true)
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX2]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]]
-; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.early.exit:
-; CHECK-NEXT: [[INDEX_LCSSA:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
-; CHECK-NEXT: ret i64 [[INDEX_LCSSA]]
+; CHECK-NEXT: ret i64 [[EARLY_EXIT_VALUE]]
; CHECK: loop.end:
-; CHECK-NEXT: [[INDEX_LCSSA1:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[INDEX_LCSSA1]]
+; CHECK-NEXT: ret i64 66
;
entry:
%p1 = alloca [1024 x i8]
@@ -1450,27 +1192,11 @@ define i64 @diff_exit_block_post_inc_use1() {
; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP13]], i1 true)
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]]
-; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.early.exit:
-; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
-; CHECK-NEXT: ret i64 [[RETVAL1]]
+; CHECK-NEXT: ret i64 [[EARLY_EXIT_VALUE]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[RETVAL2]]
+; CHECK-NEXT: ret i64 67
;
entry:
%p1 = alloca [1024 x i8]
@@ -1536,27 +1262,11 @@ define i64 @diff_exit_block_post_inc_use2() {
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 1
; CHECK-NEXT: [[TMP21:%.*]] = add i64 3, [[TMP11]]
-; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.early.exit:
-; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP]] ], [ [[TMP21]], [[VECTOR_EARLY_EXIT]] ]
-; CHECK-NEXT: ret i64 [[RETVAL1]]
+; CHECK-NEXT: ret i64 [[TMP21]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[RETVAL2]]
+; CHECK-NEXT: ret i64 66
;
entry:
%p1 = alloca [1024 x i8]
@@ -1624,29 +1334,11 @@ define i64 @diff_exit_block_post_inc_use3(i64 %start) {
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], 1
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 [[START]], [[TMP12]]
-; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ [[START]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[INDEX2_NEXT]] = add i64 [[INDEX2]], 1
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.early.exit:
-; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX2_NEXT]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
-; CHECK-NEXT: ret i64 [[RETVAL1]]
+; CHECK-NEXT: ret i64 [[EARLY_EXIT_VALUE]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX2]], [[LOOP_INC]] ], [ [[IND_ESCAPE]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[RETVAL2]]
+; CHECK-NEXT: ret i64 [[IND_ESCAPE]]
;
entry:
%p1 = alloca [1024 x i8]
@@ -1713,21 +1405,8 @@ define i64 @loop_contains_safe_call() {
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP9]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load float, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[SQRT:%.*]] = tail call fast float @llvm.sqrt.f32(float [[LD1]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp fast ult float [[SQRT]], 3.000000e+00
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -1788,21 +1467,8 @@ define i64 @loop_contains_safe_div() {
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP9]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[LD1]], 20000
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[DIV]], 1
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -1864,22 +1530,8 @@ define i64 @loop_contains_load_after_early_exit(ptr dereferenceable(1024) align(
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP11]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LD1]], 1
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i64, ptr [[ARRAYIDX2]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[LD2]], [[LOOP_INC]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
@@ -2071,22 +1723,8 @@ define i64 @same_exit_block_pre_inc_use1_deref_ptrs(ptr dereferenceable(1024) %p
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]]
; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]]
; CHECK-NEXT: br label [[LOOP_END]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
-; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
-; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
-; CHECK: loop.inc:
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
; CHECK: loop.end:
-; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: ret i64 [[RETVAL]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll
index 66300ed..19ab96d 100644
--- a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll
+++ b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll
@@ -41,18 +41,7 @@ define void @pr75298_store_reduction_value_in_folded_loop(i64 %iv.start) optsize
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP4]])
; CHECK-NEXT: store i32 [[TMP6]], ptr @a, align 4
-; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_START]], [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[L:%.*]] = load i32, ptr @c, align 4
-; CHECK-NEXT: [[RED_NEXT]] = xor i32 [[RED]], [[L]]
-; CHECK-NEXT: store i32 [[RED_NEXT]], ptr @a, align 4
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 7
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT_LOOPEXIT]], label [[LOOP]]
; CHECK: exit.loopexit:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
diff --git a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll
index 7027d85..ca32808 100644
--- a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll
+++ b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll
@@ -23,19 +23,9 @@ define float @pr70988() {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT3]], 1022
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], 1.000000e+00
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[INDEX_NEXT]], 1021
-; CHECK-NEXT: br i1 [[COND]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[RDX_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[DOTLCSSA]]
+; CHECK-NEXT: ret float [[TMP5]]
;
; CHECK-ALM-LABEL: define float @pr70988() {
; CHECK-ALM-NEXT: entry:
@@ -56,19 +46,9 @@ define float @pr70988() {
; CHECK-ALM-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT3]], 1022
; CHECK-ALM-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-ALM: middle.block:
-; CHECK-ALM-NEXT: br label [[EXIT:%.*]]
-; CHECK-ALM: scalar.ph:
; CHECK-ALM-NEXT: br label [[LOOP:%.*]]
-; CHECK-ALM: loop:
-; CHECK-ALM-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
-; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
-; CHECK-ALM-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], 1.000000e+00
-; CHECK-ALM-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 1
-; CHECK-ALM-NEXT: [[COND:%.*]] = icmp ult i32 [[INDEX_NEXT]], 1021
-; CHECK-ALM-NEXT: br i1 [[COND]], label [[LOOP]], label [[EXIT]]
; CHECK-ALM: exit:
-; CHECK-ALM-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[RDX_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
-; CHECK-ALM-NEXT: ret float [[DOTLCSSA]]
+; CHECK-ALM-NEXT: ret float [[TMP5]]
;
entry:
br label %loop
@@ -123,21 +103,9 @@ define float @pr72720reduction_using_active_lane_mask(ptr %src) {
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[NARROW:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[NARROW]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[SRC]], i32 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP]], align 4
-; CHECK-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], [[L]]
-; CHECK-NEXT: [[EC:%.*]] = icmp ult i32 [[NARROW]], 15
-; CHECK-NEXT: br i1 [[EC]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[RDX_NEXT]], [[LOOP]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[DOTLCSSA]]
+; CHECK-NEXT: ret float [[TMP13]]
;
; CHECK-ALM-LABEL: define float @pr72720reduction_using_active_lane_mask(
; CHECK-ALM-SAME: ptr [[SRC:%.*]]) {
@@ -173,21 +141,9 @@ define float @pr72720reduction_using_active_lane_mask(ptr %src) {
; CHECK-ALM-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16
; CHECK-ALM-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-ALM: middle.block:
-; CHECK-ALM-NEXT: br label [[EXIT:%.*]]
-; CHECK-ALM: scalar.ph:
; CHECK-ALM-NEXT: br label [[LOOP:%.*]]
-; CHECK-ALM: loop:
-; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[NARROW:%.*]], [[LOOP]] ]
-; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
-; CHECK-ALM-NEXT: [[NARROW]] = add nuw nsw i32 [[IV]], 1
-; CHECK-ALM-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[SRC]], i32 [[IV]]
-; CHECK-ALM-NEXT: [[L:%.*]] = load float, ptr [[GEP]], align 4
-; CHECK-ALM-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], [[L]]
-; CHECK-ALM-NEXT: [[EC:%.*]] = icmp ult i32 [[NARROW]], 15
-; CHECK-ALM-NEXT: br i1 [[EC]], label [[LOOP]], label [[EXIT]]
; CHECK-ALM: exit:
-; CHECK-ALM-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[RDX_NEXT]], [[LOOP]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ]
-; CHECK-ALM-NEXT: ret float [[DOTLCSSA]]
+; CHECK-ALM-NEXT: ret float [[TMP11]]
;
entry:
br label %loop
@@ -229,19 +185,9 @@ define float @fadd_reduction_with_live_in(float %inc) {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1002
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]]
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[LCSSA:%.*]] = phi float [ [[SUM_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret float [[LCSSA]]
+; CHECK-NEXT: ret float [[TMP5]]
;
; CHECK-ALM-LABEL: define float @fadd_reduction_with_live_in(
; CHECK-ALM-SAME: float [[INC:%.*]]) {
@@ -263,19 +209,9 @@ define float @fadd_reduction_with_live_in(float %inc) {
; CHECK-ALM-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1002
; CHECK-ALM-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-ALM: middle.block:
-; CHECK-ALM-NEXT: br label [[EXIT:%.*]]
-; CHECK-ALM: scalar.ph:
; CHECK-ALM-NEXT: br label [[LOOP:%.*]]
-; CHECK-ALM: loop:
-; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-ALM-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ]
-; CHECK-ALM-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]]
-; CHECK-ALM-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-ALM-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000
-; CHECK-ALM-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK-ALM: exit:
-; CHECK-ALM-NEXT: [[LCSSA:%.*]] = phi float [ [[SUM_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
-; CHECK-ALM-NEXT: ret float [[LCSSA]]
+; CHECK-ALM-NEXT: ret float [[TMP5]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll
index 97f686c..dcab18f 100644
--- a/llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll
+++ b/llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll
@@ -22,16 +22,6 @@ define void @test_variable_stride(ptr %dst, i32 %scale) {
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IDX:%.*]] = mul i32 [[IV]], [[SCALE]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[DST]], i32 [[IDX]]
-; CHECK-NEXT: store i32 [[IV]], ptr [[GEP]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll
index 87eebb7b..a852b73 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll
@@ -54,16 +54,6 @@ define i32 @test(ptr %vf1, i64 %n) {
; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[FOR_BODY:.*]]
-; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP18:%.*]] = alloca i8, i64 [[N]], align 16
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[VF1]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store ptr [[TMP18]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV]], 200
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret i32 0
;
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll
index 4bc4e54..00e04c7 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll
@@ -34,15 +34,6 @@ define void @canonical_small_tc_i8(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 15
-; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]]
; CHECK: [[END]]:
; CHECK-NEXT: ret void
;
@@ -94,15 +85,6 @@ define void @canonical_upper_limit_i8(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 255
-; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]]
; CHECK: [[END]]:
; CHECK-NEXT: ret void
;
@@ -154,15 +136,6 @@ define void @canonical_lower_limit_i16(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 257
-; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]]
; CHECK: [[END]]:
; CHECK-NEXT: ret void
;
@@ -214,15 +187,6 @@ define void @canonical_upper_limit_i16(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 65535
-; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]]
; CHECK: [[END]]:
; CHECK-NEXT: ret void
;
@@ -274,15 +238,6 @@ define void @canonical_lower_limit_i32(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 65537
-; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]]
; CHECK: [[END]]:
; CHECK-NEXT: ret void
;
@@ -334,15 +289,6 @@ define void @canonical_upper_limit_i32(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 4294967295
-; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]]
; CHECK: [[END]]:
; CHECK-NEXT: ret void
;
@@ -394,15 +340,6 @@ define void @canonical_lower_limit_i64(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 4294967297
-; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]]
; CHECK: [[END]]:
; CHECK-NEXT: ret void
;
@@ -454,15 +391,6 @@ define void @canonical_upper_limit_i64(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
-; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], -1
-; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]]
; CHECK: [[END]]:
; CHECK-NEXT: ret void
;
@@ -514,15 +442,6 @@ define void @canonical_lower_limit_i128(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[END:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i256 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i256 [[IV]]
-; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i256 [[IV]], 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i256 [[IV_NEXT]], 18446744073709551617
-; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]]
; CHECK: [[END]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll
index 6fd7c70..b6f43aa 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll
@@ -55,22 +55,6 @@ define void @tail_fold_switch(ptr %dst, i32 %0) {
; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: switch i32 [[TMP0]], label %[[LOOP_LATCH]] [
-; CHECK-NEXT: i32 0, label %[[LOOP_LATCH]]
-; CHECK-NEXT: i32 1, label %[[IF_THEN:.*]]
-; CHECK-NEXT: ]
-; CHECK: [[IF_THEN]]:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 4
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
index 45c56a0..3bc5da1 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
@@ -53,18 +53,9 @@ define void @VF1-VPlanExe(ptr %dst) {
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
-; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i32 0, ptr [[DST_PTR]], align 4
-; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
;
entry:
br label %for.body
@@ -132,17 +123,9 @@ define void @VF1-VPWidenCanonicalIVRecipeExe(ptr %ptr1) {
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
-; CHECK: for.body:
-; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[PTR1]], [[SCALAR_PH:%.*]] ]
-; CHECK-NEXT: store double 0.000000e+00, ptr [[ADDR]], align 8
-; CHECK-NEXT: [[PTR]] = getelementptr inbounds double, ptr [[ADDR]], i64 1
-; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[PTR]], [[PTR2]]
-; CHECK-NEXT: br i1 [[COND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
;
entry:
%ptr2 = getelementptr inbounds double, ptr %ptr1, i64 15
diff --git a/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll b/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll
index 387a02e..8a16293 100644
--- a/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll
+++ b/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll
@@ -133,26 +133,7 @@ define void @ext_cmp(ptr %src.1, ptr %src.2, ptr noalias %dst) {
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds i16, ptr [[SRC_1]], i64 [[IV]]
-; CHECK-NEXT: [[I2:%.*]] = load i16, ptr [[GEP_SRC_1]], align 2
-; CHECK-NEXT: [[I3:%.*]] = sext i16 [[I2]] to i32
-; CHECK-NEXT: [[C_1:%.*]] = icmp sgt i32 0, [[I3]]
-; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds i8, ptr [[SRC_2]], i64 [[IV]]
-; CHECK-NEXT: [[I4:%.*]] = load i8, ptr [[GEP_SRC_2]], align 2
-; CHECK-NEXT: [[I5:%.*]] = zext i8 [[I4]] to i32
-; CHECK-NEXT: [[I6:%.*]] = select i1 [[C_1]], i32 0, i32 [[I5]]
-; CHECK-NEXT: [[I7:%.*]] = and i32 [[I6]], 0
-; CHECK-NEXT: [[I8:%.*]] = trunc nuw nsw i32 [[I7]] to i16
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store i16 [[I8]], ptr [[GEP_DST]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll b/llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll
index 83ecf1a..6e7cdba 100644
--- a/llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll
+++ b/llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll
@@ -26,21 +26,7 @@ define void @pr77468(ptr noalias %src, ptr noalias %dst, i1 %x) {
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i16 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 1
-; CHECK-NEXT: [[X_EXT:%.*]] = zext i1 [[X]] to i32
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[X_EXT]], [[L]]
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i16 [[IV]]
-; CHECK-NEXT: [[T:%.*]] = trunc i32 [[AND]] to i16
-; CHECK-NEXT: store i16 [[T]], ptr [[GEP_DST]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i16 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll b/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll
index 2f5f157..2aebb73 100644
--- a/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll
@@ -18,11 +18,7 @@ define i8 @reduction_and_trunc(ptr noalias nocapture %ptr) {
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[AND_LCSSA_OFF0:%.*]] = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> [[TMP2]])
; CHECK-NEXT: ret i8 [[AND_LCSSA_OFF0]]
@@ -64,11 +60,7 @@ define i16 @reduction_or_trunc(ptr noalias nocapture %ptr) {
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[XOR_LCSSA_OFF0:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP2]])
; CHECK-NEXT: ret i16 [[XOR_LCSSA_OFF0]]
@@ -110,11 +102,7 @@ define i16 @reduction_xor_trunc(ptr noalias nocapture %ptr) {
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: [[XOR_LCSSA_OFF0:%.*]] = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> [[TMP2]])
; CHECK-NEXT: ret i16 [[XOR_LCSSA_OFF0]]
diff --git a/llvm/test/Transforms/LoopVectorize/trunc-shifts.ll b/llvm/test/Transforms/LoopVectorize/trunc-shifts.ll
index 4a372b5..498c58d 100644
--- a/llvm/test/Transforms/LoopVectorize/trunc-shifts.ll
+++ b/llvm/test/Transforms/LoopVectorize/trunc-shifts.ll
@@ -24,20 +24,7 @@ define void @test_pr47927_lshr_const_shift_ops(ptr %dst, i32 %f) {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[L:%.*]] = lshr i32 [[F]], 18
-; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8
-; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]]
-; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
-; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100
-; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -81,20 +68,7 @@ define void @test_shl_const_shift_ops(ptr %dst, i32 %f) {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[L:%.*]] = shl i32 [[F]], 18
-; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8
-; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]]
-; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
-; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100
-; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -138,20 +112,7 @@ define void @test_ashr_const_shift_ops(ptr %dst, i32 %f) {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[L:%.*]] = ashr i32 [[F]], 18
-; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8
-; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]]
-; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
-; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100
-; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -195,22 +156,7 @@ define void @test_shl_const_shifted_op(ptr %dst, i32 %f) {
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]]
-; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[GEP]], align 1
-; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[LV]] to i32
-; CHECK-NEXT: [[L:%.*]] = shl i32 19, [[ZEXT]]
-; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8
-; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
-; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100
-; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -257,22 +203,7 @@ define void @test_lshr_by_18(ptr %A) {
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV_EXT]]
-; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[GEP]], align 1
-; CHECK-NEXT: [[LV_EXT:%.*]] = zext i8 [[LV]] to i32
-; CHECK-NEXT: [[L:%.*]] = lshr i32 [[LV_EXT]], 18
-; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8
-; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
-; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100
-; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -318,22 +249,7 @@ define void @test_lshr_by_4(ptr %A) {
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV_EXT]]
-; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[GEP]], align 1
-; CHECK-NEXT: [[LV_EXT:%.*]] = zext i8 [[LV]] to i32
-; CHECK-NEXT: [[L:%.*]] = lshr i32 [[LV_EXT]], 4
-; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8
-; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
-; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100
-; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll b/llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll
index d6273e0..b85f274 100644
--- a/llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll
+++ b/llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll
@@ -22,19 +22,7 @@ define void @uitofp_preserve_nneg(ptr %result, i32 %size, float %y) {
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[FOR_EXIT:%.*]]
-; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER4:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[CONV:%.*]] = uitofp nneg i32 [[TMP4]] to float
-; CHECK-NEXT: [[TMP5:%.*]] = fmul float [[CONV]], [[Y]]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = zext nneg i32 [[TMP4]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[RESULT]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: store float [[TMP5]], ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[TMP4]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], 256
-; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_EXIT]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
index ccb301f..985a9a2 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
@@ -21,21 +21,6 @@ define void @blend_uniform_iv_trunc(i1 %c) {
; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[IV_TRUNC_2:%.*]] = trunc i64 [[IV]] to i16
-; CHECK-NEXT: br i1 [[C]], label %[[LOOP_NEXT:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_NEXT]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ poison, %[[LOOP_HEADER]] ], [ [[IV_TRUNC_2]], %[[LOOP_NEXT]] ]
-; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i16 [[BLEND]]
-; CHECK-NEXT: store i16 0, ptr [[DST_PTR]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31
-; CHECK-NEXT: br i1 [[CMP439]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -79,20 +64,6 @@ define void @blend_uniform_iv(i1 %c) {
; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 [[C]], label %[[LOOP_NEXT:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_NEXT]]:
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[BLEND:%.*]] = phi i64 [ poison, %[[LOOP_HEADER]] ], [ [[IV]], %[[LOOP_NEXT]] ]
-; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[BLEND]]
-; CHECK-NEXT: store i16 0, ptr [[DST_PTR]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31
-; CHECK-NEXT: br i1 [[CMP439]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -150,25 +121,6 @@ define void @blend_chain_iv(i1 %c) {
; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: br i1 [[C]], label %[[LOOP_NEXT:.*]], label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_NEXT]]:
-; CHECK-NEXT: br i1 [[C]], label %[[LOOP_NEXT_2:.*]], label %[[LOOP_NEXT_3:.*]]
-; CHECK: [[LOOP_NEXT_2]]:
-; CHECK-NEXT: br label %[[LOOP_NEXT_3]]
-; CHECK: [[LOOP_NEXT_3]]:
-; CHECK-NEXT: [[BLEND_1:%.*]] = phi i64 [ undef, %[[LOOP_NEXT]] ], [ [[IV]], %[[LOOP_NEXT_2]] ]
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[BLEND:%.*]] = phi i64 [ undef, %[[LOOP_HEADER]] ], [ [[BLEND_1]], %[[LOOP_NEXT_3]] ]
-; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[BLEND]]
-; CHECK-NEXT: store i16 0, ptr [[DST_PTR]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31
-; CHECK-NEXT: br i1 [[CMP439]], label %[[LOOP_HEADER]], label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -275,22 +227,6 @@ define void @redundant_branch_and_blends_without_mask(ptr %A) {
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[GEP_IV:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
-; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_IV]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[L]], 10
-; CHECK-NEXT: br label %[[LOOP_LATCH]]
-; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: [[P_1:%.*]] = phi i32 [ [[L]], %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[P_2:%.*]] = phi i32 [ [[ADD]], %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[RES:%.*]] = add i32 [[P_1]], [[P_2]]
-; CHECK-NEXT: store i32 [[RES]], ptr [[GEP_IV]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll
index 2c49fda..571c55c 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll
@@ -24,7 +24,8 @@ define void @ld_div1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -64,10 +65,11 @@ define void @ld_div2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -112,10 +114,11 @@ define void @ld_div3_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -167,10 +170,11 @@ define void @ld_div1_step2_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -214,10 +218,11 @@ define void @ld_div2_step2_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: store i64 [[TMP8]], ptr [[TMP6]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -269,10 +274,11 @@ define void @ld_div3_step2_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -324,7 +330,7 @@ define void @ld_div1_step3_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -379,7 +385,7 @@ define void @ld_div2_step3_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -426,7 +432,7 @@ define void @ld_div3_step3_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: store i64 [[TMP8]], ptr [[TMP6]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -467,7 +473,7 @@ define void @ld_div1_step1_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[TMP3]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998
-; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -516,7 +522,7 @@ define void @ld_div2_step1_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -565,7 +571,7 @@ define void @ld_div3_step1_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -621,7 +627,7 @@ define void @ld_div1_step2_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -669,7 +675,7 @@ define void @ld_div2_step2_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: store i64 [[TMP9]], ptr [[TMP7]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -725,7 +731,7 @@ define void @ld_div3_step2_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -781,7 +787,7 @@ define void @ld_div1_step3_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -837,7 +843,7 @@ define void @ld_div2_step3_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -885,7 +891,7 @@ define void @ld_div3_step3_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: store i64 [[TMP9]], ptr [[TMP7]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -933,10 +939,11 @@ define void @test_step_is_not_invariant(ptr %A) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2)
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], 56
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll
index c7525fb..6cf82fc 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll
@@ -24,7 +24,8 @@ define void @ld_and_neg1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -64,10 +65,11 @@ define void @ld_and_neg2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -112,10 +114,11 @@ define void @ld_and_neg3_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -167,10 +170,11 @@ define void @ld_and_neg1_step2_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -212,10 +216,11 @@ define void @ld_and_neg2_step2_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: store i64 [[TMP5]], ptr [[TMP7]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -267,7 +272,7 @@ define void @ld_and_neg1_step3_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -322,7 +327,7 @@ define void @ld_and_neg2_step3_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -371,7 +376,7 @@ define void @ld_and_neg2_step1_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -427,7 +432,7 @@ define void @ld_and_neg2_step2_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -483,7 +488,7 @@ define void @ld_and_neg2_step3_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -539,7 +544,7 @@ define void @ld_and_neg3_step3_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll
index 27cefa2..9ed2240 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll
@@ -58,7 +58,8 @@ define void @ld_div2_urem3_1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -130,10 +131,11 @@ define void @ld_div2_urem3_2(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 8)
; CHECK-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -203,10 +205,11 @@ define void @ld_div4(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 8)
; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
@@ -247,10 +250,11 @@ define void @ld_div8_urem3(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: store <8 x i64> [[BROADCAST_SPLAT]], ptr [[TMP5]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
+; CHECK: exit:
+; CHECK-NEXT: ret void
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll
index cee53b5..2b5d0f3 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll
@@ -25,7 +25,8 @@ define void @ld_lshr0_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[EXIT:%.*]]
-; VF2: scalar.ph:
+; VF2: exit:
+; VF2-NEXT: ret void
;
; VF4-LABEL: define void @ld_lshr0_step1_start0_ind1
; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
@@ -46,7 +47,8 @@ define void @ld_lshr0_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[EXIT:%.*]]
-; VF4: scalar.ph:
+; VF4: exit:
+; VF4-NEXT: ret void
;
entry:
br label %loop
@@ -86,10 +88,11 @@ define void @ld_lshr1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8
; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VF2-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; VF2-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[EXIT:%.*]]
-; VF2: scalar.ph:
+; VF2: exit:
+; VF2-NEXT: ret void
;
; VF4-LABEL: define void @ld_lshr1_step1_start0_ind1
; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
@@ -123,10 +126,11 @@ define void @ld_lshr1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
; VF4-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; VF4-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[EXIT:%.*]]
-; VF4: scalar.ph:
+; VF4: exit:
+; VF4-NEXT: ret void
;
entry:
br label %loop
@@ -166,10 +170,11 @@ define void @ld_lshr2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8
; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VF2-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; VF2-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[EXIT:%.*]]
-; VF2: scalar.ph:
+; VF2: exit:
+; VF2-NEXT: ret void
;
; VF4-LABEL: define void @ld_lshr2_step1_start0_ind1
; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
@@ -189,10 +194,11 @@ define void @ld_lshr2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8
; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VF4-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; VF4-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[EXIT:%.*]]
-; VF4: scalar.ph:
+; VF4: exit:
+; VF4-NEXT: ret void
;
entry:
br label %loop
@@ -244,10 +250,11 @@ define void @ld_lshr0_step2_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; VF2-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[EXIT:%.*]]
-; VF2: scalar.ph:
+; VF2: exit:
+; VF2-NEXT: ret void
;
; VF4-LABEL: define void @ld_lshr0_step2_start0_ind1
; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
@@ -296,10 +303,11 @@ define void @ld_lshr0_step2_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8)
; VF4-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[EXIT:%.*]]
-; VF4: scalar.ph:
+; VF4: exit:
+; VF4-NEXT: ret void
;
entry:
br label %loop
@@ -343,10 +351,11 @@ define void @ld_lshr1_step2_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: store i64 [[TMP8]], ptr [[TMP6]], align 8
; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VF2-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; VF2-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[EXIT:%.*]]
-; VF2: scalar.ph:
+; VF2: exit:
+; VF2-NEXT: ret void
;
; VF4-LABEL: define void @ld_lshr1_step2_start0_ind1
; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
@@ -379,10 +388,11 @@ define void @ld_lshr1_step2_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: store i64 [[TMP14]], ptr [[TMP10]], align 8
; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VF4-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; VF4-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[EXIT:%.*]]
-; VF4: scalar.ph:
+; VF4: exit:
+; VF4-NEXT: ret void
;
entry:
br label %loop
@@ -434,7 +444,7 @@ define void @ld_lshr0_step3_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; VF2-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -486,7 +496,7 @@ define void @ld_lshr0_step3_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12)
; VF4-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -541,7 +551,7 @@ define void @ld_lshr1_step3_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; VF2-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -593,7 +603,7 @@ define void @ld_lshr1_step3_start0_ind1(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12)
; VF4-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -643,7 +653,7 @@ define void @ld_lshr1_step1_start1_ind1(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; VF2-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998
-; VF2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -681,7 +691,7 @@ define void @ld_lshr1_step1_start1_ind1(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
; VF4-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996
-; VF4-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -729,7 +739,7 @@ define void @ld_lshr1_step2_start1_ind1(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: store i64 [[TMP9]], ptr [[TMP7]], align 8
; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VF2-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498
-; VF2-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -766,7 +776,7 @@ define void @ld_lshr1_step2_start1_ind1(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: store i64 [[TMP15]], ptr [[TMP11]], align 8
; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VF4-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 496
-; VF4-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -822,7 +832,7 @@ define void @ld_lshr1_step3_start1_ind1(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; VF2-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -875,7 +885,7 @@ define void @ld_lshr1_step3_start1_ind1(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12)
; VF4-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF4-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -931,7 +941,7 @@ define void @ld_lshr2_step3_start1_ind1(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; VF2-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -984,7 +994,7 @@ define void @ld_lshr2_step3_start1_ind1(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12)
; VF4-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF4-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll
index 0f8289d..12851d7 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll
@@ -35,7 +35,8 @@ define void @ld_div1_step1_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[EXIT:%.*]]
-; VF2: scalar.ph:
+; VF2: exit:
+; VF2-NEXT: ret void
;
; VF4-LABEL: define void @ld_div1_step1_start0_ind2
; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
@@ -76,7 +77,8 @@ define void @ld_div1_step1_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[EXIT:%.*]]
-; VF4: scalar.ph:
+; VF4: exit:
+; VF4-NEXT: ret void
;
entry:
br label %loop
@@ -121,10 +123,11 @@ define void @ld_div2_step1_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP5]], align 8
; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VF2-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; VF2-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[EXIT:%.*]]
-; VF2: scalar.ph:
+; VF2: exit:
+; VF2-NEXT: ret void
;
; VF4-LABEL: define void @ld_div2_step1_start0_ind2
; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
@@ -162,10 +165,11 @@ define void @ld_div2_step1_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[EXIT:%.*]]
-; VF4: scalar.ph:
+; VF4: exit:
+; VF4-NEXT: ret void
;
entry:
br label %loop
@@ -218,10 +222,11 @@ define void @ld_div3_step1_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[EXIT:%.*]]
-; VF2: scalar.ph:
+; VF2: exit:
+; VF2-NEXT: ret void
;
; VF4-LABEL: define void @ld_div3_step1_start0_ind2
; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
@@ -259,10 +264,11 @@ define void @ld_div3_step1_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
-; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[EXIT:%.*]]
-; VF4: scalar.ph:
+; VF4: exit:
+; VF4-NEXT: ret void
;
entry:
br label %loop
@@ -322,10 +328,11 @@ define void @ld_div1_step2_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[EXIT:%.*]]
-; VF2: scalar.ph:
+; VF2: exit:
+; VF2-NEXT: ret void
;
; VF4-LABEL: define void @ld_div1_step2_start0_ind2
; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
@@ -378,10 +385,11 @@ define void @ld_div1_step2_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[EXIT:%.*]]
-; VF4: scalar.ph:
+; VF4: exit:
+; VF4-NEXT: ret void
;
entry:
br label %loop
@@ -441,10 +449,11 @@ define void @ld_div2_step2_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[EXIT:%.*]]
-; VF2: scalar.ph:
+; VF2: exit:
+; VF2-NEXT: ret void
;
; VF4-LABEL: define void @ld_div2_step2_start0_ind2
; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
@@ -497,10 +506,11 @@ define void @ld_div2_step2_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[EXIT:%.*]]
-; VF4: scalar.ph:
+; VF4: exit:
+; VF4-NEXT: ret void
;
entry:
br label %loop
@@ -560,10 +570,11 @@ define void @ld_div3_step2_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[EXIT:%.*]]
-; VF2: scalar.ph:
+; VF2: exit:
+; VF2-NEXT: ret void
;
; VF4-LABEL: define void @ld_div3_step2_start0_ind2
; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
@@ -616,10 +627,11 @@ define void @ld_div3_step2_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500
-; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[EXIT:%.*]]
-; VF4: scalar.ph:
+; VF4: exit:
+; VF4-NEXT: ret void
;
entry:
br label %loop
@@ -679,7 +691,7 @@ define void @ld_div1_step3_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -735,7 +747,7 @@ define void @ld_div1_step3_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -798,7 +810,7 @@ define void @ld_div2_step3_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -854,7 +866,7 @@ define void @ld_div2_step3_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -917,7 +929,7 @@ define void @ld_div3_step3_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -973,7 +985,7 @@ define void @ld_div3_step3_start0_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -1030,7 +1042,7 @@ define void @ld_div1_step1_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998
-; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -1072,7 +1084,7 @@ define void @ld_div1_step1_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996
-; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -1129,7 +1141,7 @@ define void @ld_div2_step1_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998
-; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -1171,7 +1183,7 @@ define void @ld_div2_step1_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996
-; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -1228,7 +1240,7 @@ define void @ld_div3_step1_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998
-; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -1270,7 +1282,7 @@ define void @ld_div3_step1_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996
-; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -1334,7 +1346,7 @@ define void @ld_div1_step2_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498
-; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -1391,7 +1403,7 @@ define void @ld_div1_step2_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 496
-; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -1455,7 +1467,7 @@ define void @ld_div2_step2_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498
-; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -1512,7 +1524,7 @@ define void @ld_div2_step2_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 496
-; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -1576,7 +1588,7 @@ define void @ld_div3_step2_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498
-; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -1633,7 +1645,7 @@ define void @ld_div3_step2_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 496
-; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -1697,7 +1709,7 @@ define void @ld_div1_step3_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -1754,7 +1766,7 @@ define void @ld_div1_step3_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -1818,7 +1830,7 @@ define void @ld_div2_step3_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -1875,7 +1887,7 @@ define void @ld_div2_step3_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
@@ -1939,7 +1951,7 @@ define void @ld_div3_step3_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6)
; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2)
; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
; VF2: middle.block:
; VF2-NEXT: br label [[SCALAR_PH:%.*]]
; VF2: scalar.ph:
@@ -1996,7 +2008,7 @@ define void @ld_div3_step3_start1_ind2(ptr noalias %A, ptr noalias %B) {
; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12)
; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4)
; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332
-; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
+; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
; VF4: middle.block:
; VF4-NEXT: br label [[SCALAR_PH:%.*]]
; VF4: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll b/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll
index 5f83e392..5d07341 100644
--- a/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll
+++ b/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll
@@ -23,26 +23,7 @@ define void @test_not_first_lane_only_constant(ptr %A, ptr noalias %B) {
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i16 [[IV]]
-; CHECK-NEXT: br i1 false, label [[LOOP_LATCH]], label [[ELSE_1:%.*]]
-; CHECK: else.1:
-; CHECK-NEXT: br i1 false, label [[THEN_2:%.*]], label [[ELSE_2:%.*]]
-; CHECK: then.2:
-; CHECK-NEXT: br label [[ELSE_2]]
-; CHECK: else.2:
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[MERGE:%.*]] = phi ptr [ [[B]], [[ELSE_2]] ], [ poison, [[LOOP_HEADER]] ]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[MERGE]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
-; CHECK-NEXT: store i16 [[L]], ptr [[GEP_A]], align 2
-; CHECK-NEXT: [[C_2:%.*]] = icmp eq i16 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[C_2]], label [[EXIT]], label [[LOOP_HEADER]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -98,29 +79,7 @@ define void @test_not_first_lane_only_wide_compare(ptr %A, ptr noalias %B, i16 %
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i16 [[IV]]
-; CHECK-NEXT: [[L_0:%.*]] = load i16, ptr [[GEP_A]], align 2
-; CHECK-NEXT: [[C_0:%.*]] = icmp ult i16 [[L_0]], [[X]]
-; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[ELSE_1:%.*]]
-; CHECK: else.1:
-; CHECK-NEXT: [[C_1:%.*]] = icmp ult i16 [[L_0]], [[Y]]
-; CHECK-NEXT: br i1 [[C_1]], label [[THEN_2:%.*]], label [[ELSE_2:%.*]]
-; CHECK: then.2:
-; CHECK-NEXT: br label [[ELSE_2]]
-; CHECK: else.2:
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[MERGE:%.*]] = phi ptr [ [[B]], [[ELSE_2]] ], [ poison, [[LOOP_HEADER]] ]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[MERGE]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
-; CHECK-NEXT: store i16 [[L]], ptr [[GEP_A]], align 2
-; CHECK-NEXT: [[C_2:%.*]] = icmp eq i16 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[C_2]], label [[EXIT]], label [[LOOP_HEADER]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -179,29 +138,7 @@ define void @test_not_first_lane_only_wide_compare_incoming_order_swapped(ptr %A
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i16 [[IV]]
-; CHECK-NEXT: [[L_0:%.*]] = load i16, ptr [[GEP_A]], align 2
-; CHECK-NEXT: [[C_0:%.*]] = icmp ult i16 [[L_0]], [[X]]
-; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[ELSE_1:%.*]]
-; CHECK: else.1:
-; CHECK-NEXT: [[C_1:%.*]] = icmp ult i16 [[L_0]], [[Y]]
-; CHECK-NEXT: br i1 [[C_1]], label [[THEN_2:%.*]], label [[ELSE_2:%.*]]
-; CHECK: then.2:
-; CHECK-NEXT: br label [[ELSE_2]]
-; CHECK: else.2:
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[MERGE:%.*]] = phi ptr [ poison, [[LOOP_HEADER]] ], [ [[B]], [[ELSE_2]] ]
-; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[MERGE]], align 2
-; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
-; CHECK-NEXT: store i16 [[L]], ptr [[GEP_A]], align 2
-; CHECK-NEXT: [[C_2:%.*]] = icmp eq i16 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[C_2]], label [[EXIT]], label [[LOOP_HEADER]]
+; CHECK-NEXT: br label [[LOOP_LATCH:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll
index 462865d..8da1dca 100644
--- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll
+++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll
@@ -31,20 +31,8 @@ define i8 @test_early_exit_max_tc_less_than_16(ptr dereferenceable(16) %A) nosyn
; VF8UF1-NEXT: br label %[[EXIT:.*]]
; VF8UF1: [[VECTOR_EARLY_EXIT]]:
; VF8UF1-NEXT: br label %[[EXIT]]
-; VF8UF1: [[SCALAR_PH:.*]]:
-; VF8UF1-NEXT: br label %[[LOOP_HEADER:.*]]
-; VF8UF1: [[LOOP_HEADER]]:
-; VF8UF1-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; VF8UF1-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]]
-; VF8UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1
-; VF8UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0
-; VF8UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]]
-; VF8UF1: [[LOOP_LATCH]]:
-; VF8UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1
-; VF8UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16
-; VF8UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]]
; VF8UF1: [[EXIT]]:
-; VF8UF1-NEXT: [[RES:%.*]] = phi i8 [ 0, %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ]
+; VF8UF1-NEXT: [[RES:%.*]] = phi i8 [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ]
; VF8UF1-NEXT: ret i8 [[RES]]
;
; VF8UF2-LABEL: define i8 @test_early_exit_max_tc_less_than_16(
@@ -70,20 +58,8 @@ define i8 @test_early_exit_max_tc_less_than_16(ptr dereferenceable(16) %A) nosyn
; VF8UF2-NEXT: br label %[[EXIT:.*]]
; VF8UF2: [[VECTOR_EARLY_EXIT]]:
; VF8UF2-NEXT: br label %[[EXIT]]
-; VF8UF2: [[SCALAR_PH:.*]]:
-; VF8UF2-NEXT: br label %[[LOOP_HEADER:.*]]
-; VF8UF2: [[LOOP_HEADER]]:
-; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; VF8UF2-NEXT: [[P_SRC:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]]
-; VF8UF2-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC]], align 1
-; VF8UF2-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0
-; VF8UF2-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]]
-; VF8UF2: [[LOOP_LATCH]]:
-; VF8UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
-; VF8UF2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16
-; VF8UF2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]]
; VF8UF2: [[EXIT]]:
-; VF8UF2-NEXT: [[RES:%.*]] = phi i8 [ 0, %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ]
+; VF8UF2-NEXT: [[RES:%.*]] = phi i8 [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ]
; VF8UF2-NEXT: ret i8 [[RES]]
;
; VF16UF1-LABEL: define i8 @test_early_exit_max_tc_less_than_16(
@@ -104,20 +80,8 @@ define i8 @test_early_exit_max_tc_less_than_16(ptr dereferenceable(16) %A) nosyn
; VF16UF1-NEXT: br label %[[EXIT:.*]]
; VF16UF1: [[VECTOR_EARLY_EXIT]]:
; VF16UF1-NEXT: br label %[[EXIT]]
-; VF16UF1: [[SCALAR_PH:.*]]:
-; VF16UF1-NEXT: br label %[[LOOP_HEADER:.*]]
-; VF16UF1: [[LOOP_HEADER]]:
-; VF16UF1-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; VF16UF1-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]]
-; VF16UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1
-; VF16UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0
-; VF16UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]]
-; VF16UF1: [[LOOP_LATCH]]:
-; VF16UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1
-; VF16UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16
-; VF16UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]]
; VF16UF1: [[EXIT]]:
-; VF16UF1-NEXT: [[RES:%.*]] = phi i8 [ 0, %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ]
+; VF16UF1-NEXT: [[RES:%.*]] = phi i8 [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ]
; VF16UF1-NEXT: ret i8 [[RES]]
;
entry:
@@ -166,20 +130,8 @@ define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside(ptr derefer
; VF8UF1-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v8i1(<8 x i1> [[TMP3]], i1 true)
; VF8UF1-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], [[FIRST_ACTIVE_LANE]]
; VF8UF1-NEXT: br label %[[EXIT]]
-; VF8UF1: [[SCALAR_PH:.*]]:
-; VF8UF1-NEXT: br label %[[LOOP_HEADER:.*]]
-; VF8UF1: [[LOOP_HEADER]]:
-; VF8UF1-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; VF8UF1-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]]
-; VF8UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1
-; VF8UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0
-; VF8UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]]
-; VF8UF1: [[LOOP_LATCH]]:
-; VF8UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1
-; VF8UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16
-; VF8UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]]
; VF8UF1: [[EXIT]]:
-; VF8UF1-NEXT: [[RES:%.*]] = phi i64 [ [[IV1]], %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP8]], %[[VECTOR_EARLY_EXIT]] ]
+; VF8UF1-NEXT: [[RES:%.*]] = phi i64 [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP8]], %[[VECTOR_EARLY_EXIT]] ]
; VF8UF1-NEXT: ret i64 [[RES]]
;
; VF8UF2-LABEL: define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside(
@@ -212,20 +164,8 @@ define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside(ptr derefer
; VF8UF2-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i64 [[TMP9]], i64 [[TMP7]]
; VF8UF2-NEXT: [[TMP12:%.*]] = add i64 0, [[TMP11]]
; VF8UF2-NEXT: br label %[[EXIT]]
-; VF8UF2: [[SCALAR_PH:.*]]:
-; VF8UF2-NEXT: br label %[[LOOP_HEADER:.*]]
-; VF8UF2: [[LOOP_HEADER]]:
-; VF8UF2-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; VF8UF2-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]]
-; VF8UF2-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1
-; VF8UF2-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0
-; VF8UF2-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]]
-; VF8UF2: [[LOOP_LATCH]]:
-; VF8UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1
-; VF8UF2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16
-; VF8UF2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]]
; VF8UF2: [[EXIT]]:
-; VF8UF2-NEXT: [[RES:%.*]] = phi i64 [ [[IV1]], %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP12]], %[[VECTOR_EARLY_EXIT]] ]
+; VF8UF2-NEXT: [[RES:%.*]] = phi i64 [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP12]], %[[VECTOR_EARLY_EXIT]] ]
; VF8UF2-NEXT: ret i64 [[RES]]
;
; VF16UF1-LABEL: define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside(
@@ -248,20 +188,8 @@ define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside(ptr derefer
; VF16UF1-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v16i1(<16 x i1> [[TMP3]], i1 true)
; VF16UF1-NEXT: [[TMP5:%.*]] = add i64 0, [[FIRST_ACTIVE_LANE]]
; VF16UF1-NEXT: br label %[[EXIT]]
-; VF16UF1: [[SCALAR_PH:.*]]:
-; VF16UF1-NEXT: br label %[[LOOP_HEADER:.*]]
-; VF16UF1: [[LOOP_HEADER]]:
-; VF16UF1-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; VF16UF1-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]]
-; VF16UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1
-; VF16UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0
-; VF16UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]]
-; VF16UF1: [[LOOP_LATCH]]:
-; VF16UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1
-; VF16UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16
-; VF16UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]]
; VF16UF1: [[EXIT]]:
-; VF16UF1-NEXT: [[RES:%.*]] = phi i64 [ [[IV1]], %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP5]], %[[VECTOR_EARLY_EXIT]] ]
+; VF16UF1-NEXT: [[RES:%.*]] = phi i64 [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP5]], %[[VECTOR_EARLY_EXIT]] ]
; VF16UF1-NEXT: ret i64 [[RES]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll
index d013584..2317af5 100644
--- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll
+++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll
@@ -17,18 +17,8 @@ define i64 @remove_loop_region_int_iv_used_outside(ptr %dst) {
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr ptr, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store ptr null, ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 16
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ], [ 15, %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[RES]]
+; CHECK-NEXT: ret i64 15
;
entry:
br label %loop
@@ -60,18 +50,8 @@ define i64 @remove_loop_region_int_iv_inc_used_outside(ptr %dst) {
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr ptr, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store ptr null, ptr [[GEP]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 16
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[IV_NEXT]], %[[LOOP]] ], [ 16, %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret i64 [[RES]]
+; CHECK-NEXT: ret i64 16
;
entry:
br label %loop
@@ -105,19 +85,8 @@ define ptr @remove_loop_region_ptr_iv_used_outside(ptr %dst) {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[IND_ESCAPE:%.*]] = getelementptr i8, ptr [[TMP0]], i64 -8
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[DST]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[INT_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INT_IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8
-; CHECK-NEXT: [[INT_IV_NEXT]] = add i64 [[INT_IV]], 1
-; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 8
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[INT_IV_NEXT]], 16
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RES:%.*]] = phi ptr [ [[PTR_IV]], %[[LOOP]] ], [ [[IND_ESCAPE]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret ptr [[RES]]
+; CHECK-NEXT: ret ptr [[IND_ESCAPE]]
;
entry:
br label %loop
@@ -151,19 +120,8 @@ define ptr @remove_loop_region_ptr_iv_inc_used_outside(ptr %dst) {
; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[DST]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[INT_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INT_IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8
-; CHECK-NEXT: [[INT_IV_NEXT]] = add i64 [[INT_IV]], 1
-; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 8
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[INT_IV_NEXT]], 16
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RES:%.*]] = phi ptr [ [[PTR_IV_NEXT]], %[[LOOP]] ], [ [[TMP0]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: ret ptr [[RES]]
+; CHECK-NEXT: ret ptr [[TMP0]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
index 5f86469..e160a15 100644
--- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
+++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
@@ -176,15 +176,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5,
; VF8UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; VF8UF1: [[MIDDLE_BLOCK]]:
; VF8UF1-NEXT: br label %[[EXIT:.*]]
-; VF8UF1: [[SCALAR_PH:.*]]:
-; VF8UF1-NEXT: br label %[[LOOP:.*]]
-; VF8UF1: [[LOOP]]:
-; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]]
-; VF8UF1-NEXT: store i16 0, ptr [[GEP_DST]], align 2
-; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; VF8UF1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; VF8UF1-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; VF8UF1: [[EXIT]]:
; VF8UF1-NEXT: ret void
;
@@ -316,15 +307,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5,
; VF8UF2-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; VF8UF2: [[MIDDLE_BLOCK]]:
; VF8UF2-NEXT: br label %[[EXIT:.*]]
-; VF8UF2: [[SCALAR_PH:.*]]:
-; VF8UF2-NEXT: br label %[[LOOP:.*]]
-; VF8UF2: [[LOOP]]:
-; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]]
-; VF8UF2-NEXT: store i16 0, ptr [[GEP_DST]], align 2
-; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; VF8UF2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; VF8UF2-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; VF8UF2: [[EXIT]]:
; VF8UF2-NEXT: ret void
;
@@ -455,15 +437,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5,
; VF16UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; VF16UF1: [[MIDDLE_BLOCK]]:
; VF16UF1-NEXT: br label %[[EXIT:.*]]
-; VF16UF1: [[SCALAR_PH:.*]]:
-; VF16UF1-NEXT: br label %[[LOOP:.*]]
-; VF16UF1: [[LOOP]]:
-; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]]
-; VF16UF1-NEXT: store i16 0, ptr [[GEP_DST]], align 2
-; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; VF16UF1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; VF16UF1-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]]
; VF16UF1: [[EXIT]]:
; VF16UF1-NEXT: ret void
;
@@ -728,23 +701,14 @@ define void @scev_expand_step(i64 %x, ptr %dst) {
; VF8UF1: [[PRED_STORE_IF13]]:
; VF8UF1-NEXT: [[TMP40:%.*]] = mul i64 7, [[STEP]]
; VF8UF1-NEXT: [[TMP41:%.*]] = add i64 0, [[TMP40]]
-; VF8UF1-NEXT: [[TMP42:%.*]] = add i64 [[TMP41]], [[STEP]]
-; VF8UF1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP42]]
-; VF8UF1-NEXT: store i8 0, ptr [[TMP43]], align 1
+; VF8UF1-NEXT: [[IV_NEXT:%.*]] = add i64 [[TMP41]], [[STEP]]
+; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
+; VF8UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1
; VF8UF1-NEXT: br label %[[PRED_STORE_CONTINUE14]]
; VF8UF1: [[PRED_STORE_CONTINUE14]]:
; VF8UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; VF8UF1: [[MIDDLE_BLOCK]]:
; VF8UF1-NEXT: br label %[[EXIT:.*]]
-; VF8UF1: [[SCALAR_PH:.*]]:
-; VF8UF1-NEXT: br label %[[LOOP:.*]]
-; VF8UF1: [[LOOP]]:
-; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]]
-; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
-; VF8UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1
-; VF8UF1-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 16
-; VF8UF1-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]]
; VF8UF1: [[EXIT]]:
; VF8UF1-NEXT: ret void
;
@@ -922,22 +886,13 @@ define void @scev_expand_step(i64 %x, ptr %dst) {
; VF8UF2-NEXT: [[TMP81:%.*]] = mul i64 15, [[STEP]]
; VF8UF2-NEXT: [[TMP82:%.*]] = add i64 0, [[TMP81]]
; VF8UF2-NEXT: [[TMP83:%.*]] = add i64 [[TMP82]], [[STEP]]
-; VF8UF2-NEXT: [[TMP84:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP83]]
-; VF8UF2-NEXT: store i8 0, ptr [[TMP84]], align 1
+; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP83]]
+; VF8UF2-NEXT: store i8 0, ptr [[GEP_DST]], align 1
; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE30]]
; VF8UF2: [[PRED_STORE_CONTINUE30]]:
; VF8UF2-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; VF8UF2: [[MIDDLE_BLOCK]]:
; VF8UF2-NEXT: br label %[[EXIT:.*]]
-; VF8UF2: [[SCALAR_PH:.*]]:
-; VF8UF2-NEXT: br label %[[LOOP:.*]]
-; VF8UF2: [[LOOP]]:
-; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]]
-; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
-; VF8UF2-NEXT: store i8 0, ptr [[GEP_DST]], align 1
-; VF8UF2-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 16
-; VF8UF2-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]]
; VF8UF2: [[EXIT]]:
; VF8UF2-NEXT: ret void
;
@@ -1114,22 +1069,13 @@ define void @scev_expand_step(i64 %x, ptr %dst) {
; VF16UF1-NEXT: [[TMP80:%.*]] = mul i64 15, [[STEP]]
; VF16UF1-NEXT: [[TMP81:%.*]] = add i64 0, [[TMP80]]
; VF16UF1-NEXT: [[TMP82:%.*]] = add i64 [[TMP81]], [[STEP]]
-; VF16UF1-NEXT: [[TMP83:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP82]]
-; VF16UF1-NEXT: store i8 0, ptr [[TMP83]], align 1
+; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP82]]
+; VF16UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1
; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE30]]
; VF16UF1: [[PRED_STORE_CONTINUE30]]:
; VF16UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; VF16UF1: [[MIDDLE_BLOCK]]:
; VF16UF1-NEXT: br label %[[EXIT:.*]]
-; VF16UF1: [[SCALAR_PH:.*]]:
-; VF16UF1-NEXT: br label %[[LOOP:.*]]
-; VF16UF1: [[LOOP]]:
-; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]]
-; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
-; VF16UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1
-; VF16UF1-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 16
-; VF16UF1-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]]
; VF16UF1: [[EXIT]]:
; VF16UF1-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll
index 5a0c69b..06b0448 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll
@@ -753,3 +753,50 @@ exit:
%r.0.lcssa = phi i64 [ %rdx.next, %loop ]
ret i64 %r.0.lcssa
}
+
+define i64 @print_mulacc_duplicate_extends(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) {
+; CHECK-LABEL: 'print_mulacc_duplicate_extends'
+; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' {
+; CHECK-NEXT: Live-in vp<[[VF:%.+]]> = VF
+; CHECK-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF
+; CHECK-NEXT: Live-in vp<[[VTC:%.+]]> = vector-trip-count
+; CHECK-NEXT: Live-in ir<%n> = original trip-count
+; CHECK-EMPTY:
+; CHECK: vector.ph:
+; CHECK-NEXT: EMIT vp<[[RDX_START:%.+]]> = reduction-start-vector ir<0>, ir<0>, ir<1>
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<[[IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[IV_NEXT:%.+]]>
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<[[RDX:%.+]]> = phi vp<[[RDX_START]]>, vp<[[RDX_NEXT:%.+]]>
+; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[IV]]>, ir<1>
+; CHECK-NEXT: CLONE ir<[[ARRAYIDX0:%.+]]> = getelementptr inbounds ir<%x>, vp<[[STEPS]]>
+; CHECK-NEXT: vp<[[ADDR0:%.+]]> = vector-pointer ir<[[ARRAYIDX0]]>
+; CHECK-NEXT: WIDEN ir<[[LOAD0:%.+]]> = load vp<[[ADDR0]]>
+; CHECK-NEXT: EXPRESSION vp<[[RDX_NEXT:%.+]]> = ir<[[RDX]]> + reduce.sub (mul nsw (ir<[[LOAD0]]> sext to i64), (ir<[[LOAD0]]> sext to i64))
+; CHECK-NEXT: EMIT vp<[[IV_NEXT]]> = add nuw vp<[[IV]]>, vp<[[VFxUF]]>
+; CHECK-NEXT: EMIT branch-on-count vp<[[IV_NEXT]]>, vp<[[VTC]]>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ]
+ %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i16, ptr %x, i32 %iv
+ %load0 = load i16, ptr %arrayidx, align 4
+ %conv0 = sext i16 %load0 to i32
+ %mul = mul nsw i32 %conv0, %conv0
+ %conv = sext i32 %mul to i64
+ %rdx.next = sub nsw i64 %rdx, %conv
+ %iv.next = add nuw nsw i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %exit, label %loop
+
+exit:
+ %r.0.lcssa = phi i64 [ %rdx.next, %loop ]
+ ret i64 %r.0.lcssa
+}
diff --git a/llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll b/llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll
index 06b7bd8..d08ca8c 100644
--- a/llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll
+++ b/llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll
@@ -21,19 +21,6 @@ define void @pr63340(ptr %A, ptr %B) {
; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
-; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
-; CHECK-NEXT: br label [[LOOP_LATCH]]
-; CHECK: loop.latch:
-; CHECK-NEXT: [[F_0_I:%.*]] = phi ptr [ [[A]], [[LOOP_HEADER]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[F_0_I]], i64 1
-; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds ptr, ptr [[B]], i8 [[IV]]
-; CHECK-NEXT: store ptr [[GEP]], ptr [[GEP_B]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[IV_NEXT]], -128
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -78,17 +65,6 @@ define void @wide_gep_index_invariant(ptr noalias %dst, ptr noalias %src, i64 %n
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[L:%.*]] = load ptr, ptr [[SRC]], align 8
-; CHECK-NEXT: [[GEP_L:%.*]] = getelementptr float, ptr [[L]], i64 [[N]]
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr ptr, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store ptr [[GEP_L]], ptr [[GEP_DST]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -131,17 +107,6 @@ define void @wide_gep_multiple_indices_some_invariant(ptr noalias %dst, ptr noal
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[L:%.*]] = load ptr, ptr [[SRC]], align 8
-; CHECK-NEXT: [[GEP_L:%.*]] = getelementptr [10 x float], ptr [[L]], i32 [[X]], i64 [[IV]]
-; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr ptr, ptr [[DST]], i64 [[IV]]
-; CHECK-NEXT: store ptr [[GEP_L]], ptr [[GEP_DST]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll b/llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll
index 055f2fd..922ebe7 100644
--- a/llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll
+++ b/llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll
@@ -20,17 +20,6 @@ define void @powi_only_first_lane_used_of_second_arg(ptr %p, i32 %pow) {
; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[SCALAR_PH:.*]]:
-; CHECK-NEXT: br label %[[LOOP:.*]]
-; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[P_GEP:%.*]] = getelementptr float, ptr [[P]], i32 [[IV]]
-; CHECK-NEXT: [[X:%.*]] = load float, ptr [[P_GEP]], align 4
-; CHECK-NEXT: [[Y:%.*]] = call float @llvm.powi.f32.i32(float [[X]], i32 [[POW]])
-; CHECK-NEXT: store float [[Y]], ptr [[P_GEP]], align 4
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
-; CHECK-NEXT: [[DONE:%.*]] = icmp eq i32 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
index 435e6fc..5e9fe8c 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
@@ -34,8 +34,8 @@ define void @arm_mean_q7(ptr noundef %pSrc, i32 noundef %blockSize, ptr noundef
; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP2]], [[WHILE_END_LOOPEXIT]] ]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[BLOCKSIZE]], 15
; CHECK-NEXT: [[CMP2_NOT15:%.*]] = icmp eq i32 [[AND]], 0
-; CHECK-NEXT: br i1 [[CMP2_NOT15]], label [[WHILE_END5:%.*]], label [[MIDDLE_BLOCK:%.*]]
-; CHECK: middle.block:
+; CHECK-NEXT: br i1 [[CMP2_NOT15]], label [[WHILE_END5:%.*]], label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = tail call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 0, i32 [[AND]])
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[PSRC_ADDR_0_LCSSA]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i32>
@@ -44,7 +44,7 @@ define void @arm_mean_q7(ptr noundef %pSrc, i32 noundef %blockSize, ptr noundef
; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[SUM_0_LCSSA]], [[TMP6]]
; CHECK-NEXT: br label [[WHILE_END5]]
; CHECK: while.end5:
-; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA]], [[WHILE_END]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA]], [[WHILE_END]] ], [ [[TMP7]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[SUM_1_LCSSA]], [[BLOCKSIZE]]
; CHECK-NEXT: [[CONV6:%.*]] = trunc i32 [[DIV]] to i8
; CHECK-NEXT: store i8 [[CONV6]], ptr [[PRESULT:%.*]], align 1
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll
index 4f52227..02e05b2 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll
@@ -527,23 +527,14 @@ define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) {
ret void
}
-; TODO: We want to generate this code:
-; define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
-; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0
-; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
-; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 1 %gep_l0, i64 8, <4 x i1> splat (i1 true), i32 4)
-; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8>
-; store <16 x i8> %bitcast_, ptr %gep_s0, align 1
-; ret void
-; }
-define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
-; CHECK-LABEL: define void @constant_stride_widen_no_reordering(
+define void @constant_stride_masked_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @constant_stride_masked_no_reordering(
; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
; CHECK-NEXT: [[TMP1:%.*]] = call <28 x i8> @llvm.masked.load.v28i8.p0(ptr [[GEP_L0]], i32 1, <28 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <28 x i8> poison)
-; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <28 x i8> [[TMP1]], <28 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27>
-; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 1
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <28 x i8> [[TMP1]], <28 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27>
+; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 1
; CHECK-NEXT: ret void
;
%gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
@@ -618,6 +609,107 @@ define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps)
}
; TODO: We want to generate this code:
+; define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) #0 {
+; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+; %1 = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 1 %gep_l0, i64 100, <4 x i1> splat (i1 true), i32 4)
+; %2 = bitcast <4 x i32> %1 to <16 x i8>
+; store <16 x i8> %2, ptr %gep_s0, align 1
+; ret void
+; }
+define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @constant_stride_widen_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT: [[GEP_L4:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 100
+; CHECK-NEXT: [[GEP_L8:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 200
+; CHECK-NEXT: [[GEP_L12:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 300
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[GEP_L0]], align 1
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_L4]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[GEP_L8]], align 1
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[GEP_L12]], align 1
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[TMP2]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP11]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 1
+; CHECK-NEXT: ret void
+;
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 100
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 101
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 102
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 103
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 200
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 201
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 202
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 203
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 300
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 301
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 302
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 303
+
+ %load0 = load i8, ptr %gep_l0 , align 1
+ %load1 = load i8, ptr %gep_l1 , align 1
+ %load2 = load i8, ptr %gep_l2 , align 1
+ %load3 = load i8, ptr %gep_l3 , align 1
+ %load4 = load i8, ptr %gep_l4 , align 1
+ %load5 = load i8, ptr %gep_l5 , align 1
+ %load6 = load i8, ptr %gep_l6 , align 1
+ %load7 = load i8, ptr %gep_l7 , align 1
+ %load8 = load i8, ptr %gep_l8 , align 1
+ %load9 = load i8, ptr %gep_l9 , align 1
+ %load10 = load i8, ptr %gep_l10, align 1
+ %load11 = load i8, ptr %gep_l11, align 1
+ %load12 = load i8, ptr %gep_l12, align 1
+ %load13 = load i8, ptr %gep_l13, align 1
+ %load14 = load i8, ptr %gep_l14, align 1
+ %load15 = load i8, ptr %gep_l15, align 1
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load0, ptr %gep_s0, align 1
+ store i8 %load1, ptr %gep_s1, align 1
+ store i8 %load2, ptr %gep_s2, align 1
+ store i8 %load3, ptr %gep_s3, align 1
+ store i8 %load4, ptr %gep_s4, align 1
+ store i8 %load5, ptr %gep_s5, align 1
+ store i8 %load6, ptr %gep_s6, align 1
+ store i8 %load7, ptr %gep_s7, align 1
+ store i8 %load8, ptr %gep_s8, align 1
+ store i8 %load9, ptr %gep_s9, align 1
+ store i8 %load10, ptr %gep_s10, align 1
+ store i8 %load11, ptr %gep_s11, align 1
+ store i8 %load12, ptr %gep_s12, align 1
+ store i8 %load13, ptr %gep_s13, align 1
+ store i8 %load14, ptr %gep_s14, align 1
+ store i8 %load15, ptr %gep_s15, align 1
+
+ ret void
+}
+; TODO: We want to generate this code:
; define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0
; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
diff --git a/llvm/test/Transforms/SimplifyCFG/hoist-with-metadata.ll b/llvm/test/Transforms/SimplifyCFG/hoist-with-metadata.ll
index d34ac2b..85c8ed2 100644
--- a/llvm/test/Transforms/SimplifyCFG/hoist-with-metadata.ll
+++ b/llvm/test/Transforms/SimplifyCFG/hoist-with-metadata.ll
@@ -424,6 +424,174 @@ join:
ret ptr %phi
}
+define void @hoist_captures_same(i1 %c, ptr %x, ptr %y) {
+; CHECK-LABEL: @hoist_captures_same(
+; CHECK-NEXT: if:
+; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8, !captures [[META9:![0-9]+]]
+; CHECK-NEXT: ret void
+;
+if:
+ br i1 %c, label %then, label %else
+
+then:
+ store ptr %x, ptr %y, !captures !{!"address"}
+ br label %out
+
+else:
+ store ptr %x, ptr %y, !captures !{!"address"}
+ br label %out
+
+out:
+ ret void
+}
+
+define void @hoist_captures_different(i1 %c, ptr %x, ptr %y) {
+; CHECK-LABEL: @hoist_captures_different(
+; CHECK-NEXT: if:
+; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8, !captures [[META10:![0-9]+]]
+; CHECK-NEXT: ret void
+;
+if:
+ br i1 %c, label %then, label %else
+
+then:
+ store ptr %x, ptr %y, !captures !{!"address"}
+ br label %out
+
+else:
+ store ptr %x, ptr %y, !captures !{!"read_provenance"}
+ br label %out
+
+out:
+ ret void
+}
+
+define void @hoist_captures_overlap(i1 %c, ptr %x, ptr %y) {
+; CHECK-LABEL: @hoist_captures_overlap(
+; CHECK-NEXT: if:
+; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8, !captures [[META10]]
+; CHECK-NEXT: ret void
+;
+if:
+ br i1 %c, label %then, label %else
+
+then:
+ store ptr %x, ptr %y, !captures !{!"address"}
+ br label %out
+
+else:
+ store ptr %x, ptr %y, !captures !{!"address", !"read_provenance"}
+ br label %out
+
+out:
+ ret void
+}
+
+define void @hoist_captures_subsume1(i1 %c, ptr %x, ptr %y) {
+; CHECK-LABEL: @hoist_captures_subsume1(
+; CHECK-NEXT: if:
+; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8, !captures [[META9]]
+; CHECK-NEXT: ret void
+;
+if:
+ br i1 %c, label %then, label %else
+
+then:
+ store ptr %x, ptr %y, !captures !{!"address_is_null"}
+ br label %out
+
+else:
+ store ptr %x, ptr %y, !captures !{!"address"}
+ br label %out
+
+out:
+ ret void
+}
+
+define void @hoist_captures_subsume2(i1 %c, ptr %x, ptr %y) {
+; CHECK-LABEL: @hoist_captures_subsume2(
+; CHECK-NEXT: if:
+; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8, !captures [[META11:![0-9]+]]
+; CHECK-NEXT: ret void
+;
+if:
+ br i1 %c, label %then, label %else
+
+then:
+ store ptr %x, ptr %y, !captures !{!"provenance"}
+ br label %out
+
+else:
+ store ptr %x, ptr %y, !captures !{!"read_provenance"}
+ br label %out
+
+out:
+ ret void
+}
+
+define void @hoist_captures_full_set(i1 %c, ptr %x, ptr %y) {
+; CHECK-LABEL: @hoist_captures_full_set(
+; CHECK-NEXT: if:
+; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8
+; CHECK-NEXT: ret void
+;
+if:
+ br i1 %c, label %then, label %else
+
+then:
+ store ptr %x, ptr %y, !captures !{!"address"}
+ br label %out
+
+else:
+ store ptr %x, ptr %y, !captures !{!"provenance"}
+ br label %out
+
+out:
+ ret void
+}
+
+define void @hoist_captures_only_one1(i1 %c, ptr %x, ptr %y) {
+; CHECK-LABEL: @hoist_captures_only_one1(
+; CHECK-NEXT: if:
+; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8
+; CHECK-NEXT: ret void
+;
+if:
+ br i1 %c, label %then, label %else
+
+then:
+ store ptr %x, ptr %y, !captures !{!"address"}
+ br label %out
+
+else:
+ store ptr %x, ptr %y
+ br label %out
+
+out:
+ ret void
+}
+
+define void @hoist_captures_only_one2(i1 %c, ptr %x, ptr %y) {
+; CHECK-LABEL: @hoist_captures_only_one2(
+; CHECK-NEXT: if:
+; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8
+; CHECK-NEXT: ret void
+;
+if:
+ br i1 %c, label %then, label %else
+
+then:
+ store ptr %x, ptr %y
+ br label %out
+
+else:
+ store ptr %x, ptr %y, !captures !{!"address"}
+ br label %out
+
+out:
+ ret void
+}
+
!0 = !{ i8 0, i8 1 }
!1 = !{ i8 3, i8 5 }
!2 = !{}
@@ -445,4 +613,7 @@ join:
; CHECK: [[META6]] = !{float 2.500000e+00}
; CHECK: [[META7]] = !{i32 5, i32 6}
; CHECK: [[META8]] = !{i32 4, i32 5}
+; CHECK: [[META9]] = !{!"address"}
+; CHECK: [[META10]] = !{!"address", !"read_provenance"}
+; CHECK: [[META11]] = !{!"provenance"}
;.
diff --git a/llvm/test/Unit/CMakeLists.txt b/llvm/test/Unit/CMakeLists.txt
new file mode 100644
index 0000000..6b0abe1
--- /dev/null
+++ b/llvm/test/Unit/CMakeLists.txt
@@ -0,0 +1,5 @@
+add_lit_testsuite(check-llvm-unit "Running lit suite for LLVM unit tests"
+ ${CMAKE_CURRENT_BINARY_DIR}
+ EXCLUDE_FROM_CHECK_ALL
+ DEPENDS UnitTests
+ )
diff --git a/llvm/test/Verifier/captures-metadata.ll b/llvm/test/Verifier/captures-metadata.ll
new file mode 100644
index 0000000..ae08ddd
--- /dev/null
+++ b/llvm/test/Verifier/captures-metadata.ll
@@ -0,0 +1,37 @@
+; RUN: not opt -passes=verify < %s 2>&1 | FileCheck %s
+
+; CHECK: !captures metadata can only be applied to store instructions
+define void @wrong_instr_type(ptr %x) {
+ load ptr, ptr %x, !captures !{!"address"}
+ ret void
+}
+
+; CHECK: captures metadata can only be applied to store with value operand of pointer type
+define void @wrong_op_type(i32 %x, ptr %y) {
+ store i32 %x, ptr %y, !captures !{!"address"}
+ ret void
+}
+
+; CHECK: !captures metadata cannot be empty
+define void @empty(ptr %x, ptr %y) {
+ store ptr %x, ptr %y, !captures !{}
+ ret void
+}
+
+; CHECK: !captures metadata must be a list of strings
+define void @not_string(ptr %x, ptr %y) {
+ store ptr %x, ptr %y, !captures !{!{}}
+ ret void
+}
+
+; CHECK: invalid entry in !captures metadata
+define void @invalid_str(ptr %x, ptr %y) {
+ store ptr %x, ptr %y, !captures !{!"foo"}
+ ret void
+}
+
+; CHECK: invalid entry in !captures metadata
+define void @invalid_none(ptr %x, ptr %y) {
+ store ptr %x, ptr %y, !captures !{!"none"}
+ ret void
+}
diff --git a/llvm/test/tools/llvm-ir2vec/entities.ll b/llvm/test/tools/llvm-ir2vec/entities.ll
index 4b51adf..8dbce57 100644
--- a/llvm/test/tools/llvm-ir2vec/entities.ll
+++ b/llvm/test/tools/llvm-ir2vec/entities.ll
@@ -1,6 +1,6 @@
; RUN: llvm-ir2vec entities | FileCheck %s
-CHECK: 84
+CHECK: 110
CHECK-NEXT: Ret 0
CHECK-NEXT: Br 1
CHECK-NEXT: Switch 2
@@ -85,3 +85,29 @@ CHECK-NEXT: Function 80
CHECK-NEXT: Pointer 81
CHECK-NEXT: Constant 82
CHECK-NEXT: Variable 83
+CHECK-NEXT: FCMP_false 84
+CHECK-NEXT: FCMP_oeq 85
+CHECK-NEXT: FCMP_ogt 86
+CHECK-NEXT: FCMP_oge 87
+CHECK-NEXT: FCMP_olt 88
+CHECK-NEXT: FCMP_ole 89
+CHECK-NEXT: FCMP_one 90
+CHECK-NEXT: FCMP_ord 91
+CHECK-NEXT: FCMP_uno 92
+CHECK-NEXT: FCMP_ueq 93
+CHECK-NEXT: FCMP_ugt 94
+CHECK-NEXT: FCMP_uge 95
+CHECK-NEXT: FCMP_ult 96
+CHECK-NEXT: FCMP_ule 97
+CHECK-NEXT: FCMP_une 98
+CHECK-NEXT: FCMP_true 99
+CHECK-NEXT: ICMP_eq 100
+CHECK-NEXT: ICMP_ne 101
+CHECK-NEXT: ICMP_ugt 102
+CHECK-NEXT: ICMP_uge 103
+CHECK-NEXT: ICMP_ult 104
+CHECK-NEXT: ICMP_ule 105
+CHECK-NEXT: ICMP_sgt 106
+CHECK-NEXT: ICMP_sge 107
+CHECK-NEXT: ICMP_slt 108
+CHECK-NEXT: ICMP_sle 109
diff --git a/llvm/tools/llvm-cgdata/llvm-cgdata.cpp b/llvm/tools/llvm-cgdata/llvm-cgdata.cpp
index 047557e..ea89c4d 100644
--- a/llvm/tools/llvm-cgdata/llvm-cgdata.cpp
+++ b/llvm/tools/llvm-cgdata/llvm-cgdata.cpp
@@ -83,7 +83,9 @@ static CGDataAction Action;
static std::optional<CGDataFormat> OutputFormat;
static std::vector<std::string> InputFilenames;
+namespace llvm {
extern cl::opt<bool> IndexedCodeGenDataLazyLoading;
+} // end namespace llvm
static void exitWithError(Twine Message, StringRef Whence = "",
StringRef Hint = "") {
diff --git a/llvm/tools/llvm-config/llvm-config.cpp b/llvm/tools/llvm-config/llvm-config.cpp
index 49df8fd..7f8c55a 100644
--- a/llvm/tools/llvm-config/llvm-config.cpp
+++ b/llvm/tools/llvm-config/llvm-config.cpp
@@ -357,18 +357,18 @@ int main(int argc, char **argv) {
ActivePrefix = CurrentExecPrefix;
{
SmallString<256> Path(LLVM_INSTALL_INCLUDEDIR);
- sys::fs::make_absolute(ActivePrefix, Path);
+ sys::path::make_absolute(ActivePrefix, Path);
ActiveIncludeDir = std::string(Path);
}
{
SmallString<256> Path(LLVM_TOOLS_INSTALL_DIR);
- sys::fs::make_absolute(ActivePrefix, Path);
+ sys::path::make_absolute(ActivePrefix, Path);
ActiveBinDir = std::string(Path);
}
ActiveLibDir = ActivePrefix + "/lib" + LLVM_LIBDIR_SUFFIX;
{
SmallString<256> Path(LLVM_INSTALL_PACKAGE_DIR);
- sys::fs::make_absolute(ActivePrefix, Path);
+ sys::path::make_absolute(ActivePrefix, Path);
ActiveCMakeDir = std::string(Path);
}
ActiveIncludeOption = "-I" + ActiveIncludeDir;
diff --git a/llvm/tools/llvm-dwp/llvm-dwp.cpp b/llvm/tools/llvm-dwp/llvm-dwp.cpp
index 61ba82d..31bad2d 100644
--- a/llvm/tools/llvm-dwp/llvm-dwp.cpp
+++ b/llvm/tools/llvm-dwp/llvm-dwp.cpp
@@ -94,7 +94,7 @@ getDWOFilenames(StringRef ExecFilename) {
dwarf::toString(Die.find(dwarf::DW_AT_comp_dir), "");
if (!DWOCompDir.empty()) {
SmallString<16> DWOPath(DWOName);
- sys::fs::make_absolute(DWOCompDir, DWOPath);
+ sys::path::make_absolute(DWOCompDir, DWOPath);
if (!sys::fs::exists(DWOPath) && sys::fs::exists(DWOName))
DWOPaths.push_back(std::move(DWOName));
else
diff --git a/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp b/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp
index aabebf0..434449c 100644
--- a/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp
+++ b/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp
@@ -162,8 +162,8 @@ public:
for (const BasicBlock &BB : F) {
for (const auto &I : BB.instructionsWithoutDebug()) {
- unsigned Opcode = Vocabulary::getSlotIndex(I.getOpcode());
- unsigned TypeID = Vocabulary::getSlotIndex(I.getType()->getTypeID());
+ unsigned Opcode = Vocabulary::getIndex(I.getOpcode());
+ unsigned TypeID = Vocabulary::getIndex(I.getType()->getTypeID());
// Add "Next" relationship with previous instruction
if (HasPrevOpcode) {
@@ -184,7 +184,7 @@ public:
// Add "Arg" relationships
unsigned ArgIndex = 0;
for (const Use &U : I.operands()) {
- unsigned OperandID = Vocabulary::getSlotIndex(*U);
+ unsigned OperandID = Vocabulary::getIndex(*U.get());
unsigned RelationID = ArgRelation + ArgIndex;
OS << Opcode << '\t' << OperandID << '\t' << RelationID << '\n';
diff --git a/llvm/tools/llvm-jitlink/llvm-jitlink.cpp b/llvm/tools/llvm-jitlink/llvm-jitlink.cpp
index e09ddb4..731d648 100644
--- a/llvm/tools/llvm-jitlink/llvm-jitlink.cpp
+++ b/llvm/tools/llvm-jitlink/llvm-jitlink.cpp
@@ -1636,7 +1636,11 @@ static std::pair<Triple, SubtargetFeatures> getFirstFileTripleAndFeatures() {
case file_magic::macho_object: {
auto Obj = ExitOnErr(
object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef()));
- Triple TT = Obj->makeTriple();
+ Triple TT;
+ if (auto *MachOObj = dyn_cast<object::MachOObjectFile>(Obj.get()))
+ TT = MachOObj->getArchTriple();
+ else
+ TT = Obj->makeTriple();
if (Magic == file_magic::coff_object) {
// TODO: Move this to makeTriple() if possible.
TT.setObjectFormat(Triple::COFF);
diff --git a/llvm/tools/llvm-opt-report/OptReport.cpp b/llvm/tools/llvm-opt-report/OptReport.cpp
index 68ed92c..e4b4fc2 100644
--- a/llvm/tools/llvm-opt-report/OptReport.cpp
+++ b/llvm/tools/llvm-opt-report/OptReport.cpp
@@ -274,7 +274,7 @@ static bool writeReport(LocationInfoTy &LocationInfo) {
for (auto &FI : LocationInfo) {
SmallString<128> FileName(FI.first);
if (!InputRelDir.empty())
- sys::fs::make_absolute(InputRelDir, FileName);
+ sys::path::make_absolute(InputRelDir, FileName);
const auto &FileInfo = FI.second;
diff --git a/llvm/unittests/ADT/APFloatTest.cpp b/llvm/unittests/ADT/APFloatTest.cpp
index 141282e..30f0a8e5 100644
--- a/llvm/unittests/ADT/APFloatTest.cpp
+++ b/llvm/unittests/ADT/APFloatTest.cpp
@@ -10176,4 +10176,11 @@ TEST(APFloatTest, hasSignBitInMSB) {
EXPECT_FALSE(APFloat::hasSignBitInMSB(APFloat::Float8E8M0FNU()));
}
+TEST(APFloatTest, FrexpQuietSNaN) {
+ APFloat SNaN = APFloat::getSNaN(APFloat::PPCDoubleDouble());
+ int Exp;
+ APFloat Result = frexp(SNaN, Exp, APFloat::rmNearestTiesToEven);
+ EXPECT_FALSE(Result.isSignaling());
+}
+
} // namespace
diff --git a/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp b/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp
index dc6059d..b6e8567 100644
--- a/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp
+++ b/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp
@@ -43,8 +43,11 @@ class FunctionPropertiesAnalysisTest : public testing::Test {
public:
FunctionPropertiesAnalysisTest() {
auto VocabVector = ir2vec::Vocabulary::createDummyVocabForTest(1);
- MAM.registerPass([&] { return IR2VecVocabAnalysis(VocabVector); });
- IR2VecVocab = ir2vec::Vocabulary(std::move(VocabVector));
+ MAM.registerPass([VocabVector = std::move(VocabVector)]() mutable {
+ return IR2VecVocabAnalysis(std::move(VocabVector));
+ });
+ IR2VecVocab =
+ new ir2vec::Vocabulary(ir2vec::Vocabulary::createDummyVocabForTest(1));
MAM.registerPass([&] { return PassInstrumentationAnalysis(); });
FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); });
FAM.registerPass([&] { return DominatorTreeAnalysis(); });
@@ -66,7 +69,7 @@ protected:
std::unique_ptr<LoopInfo> LI;
FunctionAnalysisManager FAM;
ModuleAnalysisManager MAM;
- ir2vec::Vocabulary IR2VecVocab;
+ ir2vec::Vocabulary *IR2VecVocab;
void TearDown() override {
// Restore original IR2Vec weights
@@ -78,7 +81,7 @@ protected:
FunctionPropertiesInfo buildFPI(Function &F) {
// FunctionPropertiesInfo assumes IR2VecVocabAnalysis has been run to
// use IR2Vec.
- auto VocabResult = MAM.getResult<IR2VecVocabAnalysis>(*F.getParent());
+ auto &VocabResult = MAM.getResult<IR2VecVocabAnalysis>(*F.getParent());
(void)VocabResult;
return FunctionPropertiesInfo::getFunctionPropertiesInfo(F, FAM);
}
@@ -106,7 +109,7 @@ protected:
}
std::unique_ptr<ir2vec::Embedder> createEmbedder(const Function &F) {
- auto Emb = ir2vec::Embedder::create(IR2VecKind::Symbolic, F, IR2VecVocab);
+ auto Emb = ir2vec::Embedder::create(IR2VecKind::Symbolic, F, *IR2VecVocab);
EXPECT_TRUE(static_cast<bool>(Emb));
return Emb;
}
diff --git a/llvm/unittests/Analysis/IR2VecTest.cpp b/llvm/unittests/Analysis/IR2VecTest.cpp
index 9f2f6a3..743628f 100644
--- a/llvm/unittests/Analysis/IR2VecTest.cpp
+++ b/llvm/unittests/Analysis/IR2VecTest.cpp
@@ -295,7 +295,7 @@ TEST(IR2VecTest, ZeroDimensionEmbedding) {
// Fixture for IR2Vec tests requiring IR setup.
class IR2VecTestFixture : public ::testing::Test {
protected:
- Vocabulary V;
+ Vocabulary *V;
LLVMContext Ctx;
std::unique_ptr<Module> M;
Function *F = nullptr;
@@ -304,7 +304,7 @@ protected:
Instruction *RetInst = nullptr;
void SetUp() override {
- V = Vocabulary(Vocabulary::createDummyVocabForTest(2));
+ V = new Vocabulary(Vocabulary::createDummyVocabForTest(2));
// Setup IR
M = std::make_unique<Module>("TestM", Ctx);
@@ -322,7 +322,7 @@ protected:
};
TEST_F(IR2VecTestFixture, GetInstVecMap_Symbolic) {
- auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, V);
+ auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, *V);
ASSERT_TRUE(static_cast<bool>(Emb));
const auto &InstMap = Emb->getInstVecMap();
@@ -341,7 +341,7 @@ TEST_F(IR2VecTestFixture, GetInstVecMap_Symbolic) {
}
TEST_F(IR2VecTestFixture, GetInstVecMap_FlowAware) {
- auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, V);
+ auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, *V);
ASSERT_TRUE(static_cast<bool>(Emb));
const auto &InstMap = Emb->getInstVecMap();
@@ -358,7 +358,7 @@ TEST_F(IR2VecTestFixture, GetInstVecMap_FlowAware) {
}
TEST_F(IR2VecTestFixture, GetBBVecMap_Symbolic) {
- auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, V);
+ auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, *V);
ASSERT_TRUE(static_cast<bool>(Emb));
const auto &BBMap = Emb->getBBVecMap();
@@ -373,7 +373,7 @@ TEST_F(IR2VecTestFixture, GetBBVecMap_Symbolic) {
}
TEST_F(IR2VecTestFixture, GetBBVecMap_FlowAware) {
- auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, V);
+ auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, *V);
ASSERT_TRUE(static_cast<bool>(Emb));
const auto &BBMap = Emb->getBBVecMap();
@@ -388,7 +388,7 @@ TEST_F(IR2VecTestFixture, GetBBVecMap_FlowAware) {
}
TEST_F(IR2VecTestFixture, GetBBVector_Symbolic) {
- auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, V);
+ auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, *V);
ASSERT_TRUE(static_cast<bool>(Emb));
const auto &BBVec = Emb->getBBVector(*BB);
@@ -398,7 +398,7 @@ TEST_F(IR2VecTestFixture, GetBBVector_Symbolic) {
}
TEST_F(IR2VecTestFixture, GetBBVector_FlowAware) {
- auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, V);
+ auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, *V);
ASSERT_TRUE(static_cast<bool>(Emb));
const auto &BBVec = Emb->getBBVector(*BB);
@@ -408,7 +408,7 @@ TEST_F(IR2VecTestFixture, GetBBVector_FlowAware) {
}
TEST_F(IR2VecTestFixture, GetFunctionVector_Symbolic) {
- auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, V);
+ auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, *V);
ASSERT_TRUE(static_cast<bool>(Emb));
const auto &FuncVec = Emb->getFunctionVector();
@@ -420,7 +420,7 @@ TEST_F(IR2VecTestFixture, GetFunctionVector_Symbolic) {
}
TEST_F(IR2VecTestFixture, GetFunctionVector_FlowAware) {
- auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, V);
+ auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, *V);
ASSERT_TRUE(static_cast<bool>(Emb));
const auto &FuncVec = Emb->getFunctionVector();
@@ -435,6 +435,7 @@ static constexpr unsigned MaxOpcodes = Vocabulary::MaxOpcodes;
static constexpr unsigned MaxTypeIDs = Vocabulary::MaxTypeIDs;
static constexpr unsigned MaxCanonicalTypeIDs = Vocabulary::MaxCanonicalTypeIDs;
static constexpr unsigned MaxOperands = Vocabulary::MaxOperandKinds;
+static constexpr unsigned MaxPredicateKinds = Vocabulary::MaxPredicateKinds;
// Mapping between LLVM Type::TypeID tokens and Vocabulary::CanonicalTypeID
// names and their canonical string keys.
@@ -460,9 +461,13 @@ TEST(IR2VecVocabularyTest, DummyVocabTest) {
EXPECT_EQ(Emb.size(), Dim);
// Should have the correct total number of embeddings
- EXPECT_EQ(VocabVecSize, MaxOpcodes + MaxCanonicalTypeIDs + MaxOperands);
+ EXPECT_EQ(VocabVecSize, MaxOpcodes + MaxCanonicalTypeIDs + MaxOperands +
+ MaxPredicateKinds);
- auto ExpectedVocab = VocabVec;
+ // Collect embeddings for later comparison before moving VocabVec
+ std::vector<Embedding> ExpectedVocab;
+ for (const auto &Emb : VocabVec)
+ ExpectedVocab.push_back(Emb);
IR2VecVocabAnalysis VocabAnalysis(std::move(VocabVec));
LLVMContext TestCtx;
@@ -480,17 +485,17 @@ TEST(IR2VecVocabularyTest, DummyVocabTest) {
}
TEST(IR2VecVocabularyTest, SlotIdxMapping) {
- // Test getSlotIndex for Opcodes
+ // Test getIndex for Opcodes
#define EXPECT_OPCODE_SLOT(NUM, OPCODE, CLASS) \
- EXPECT_EQ(Vocabulary::getSlotIndex(NUM), static_cast<unsigned>(NUM - 1));
+ EXPECT_EQ(Vocabulary::getIndex(NUM), static_cast<unsigned>(NUM - 1));
#define HANDLE_INST(NUM, OPCODE, CLASS) EXPECT_OPCODE_SLOT(NUM, OPCODE, CLASS)
#include "llvm/IR/Instruction.def"
#undef HANDLE_INST
#undef EXPECT_OPCODE_SLOT
- // Test getSlotIndex for Types
+ // Test getIndex for Types
#define EXPECT_TYPE_SLOT(TypeIDTok, CanonEnum, CanonStr) \
- EXPECT_EQ(Vocabulary::getSlotIndex(Type::TypeIDTok), \
+ EXPECT_EQ(Vocabulary::getIndex(Type::TypeIDTok), \
MaxOpcodes + static_cast<unsigned>( \
Vocabulary::CanonicalTypeID::CanonEnum));
@@ -498,7 +503,7 @@ TEST(IR2VecVocabularyTest, SlotIdxMapping) {
#undef EXPECT_TYPE_SLOT
- // Test getSlotIndex for Value operands
+ // Test getIndex for Value operands
LLVMContext Ctx;
Module M("TestM", Ctx);
FunctionType *FTy =
@@ -508,40 +513,59 @@ TEST(IR2VecVocabularyTest, SlotIdxMapping) {
#define EXPECTED_VOCAB_OPERAND_SLOT(X) \
MaxOpcodes + MaxCanonicalTypeIDs + static_cast<unsigned>(X)
// Test Function operand
- EXPECT_EQ(Vocabulary::getSlotIndex(*F),
+ EXPECT_EQ(Vocabulary::getIndex(*F),
EXPECTED_VOCAB_OPERAND_SLOT(Vocabulary::OperandKind::FunctionID));
// Test Constant operand
Constant *C = ConstantInt::get(Type::getInt32Ty(Ctx), 42);
- EXPECT_EQ(Vocabulary::getSlotIndex(*C),
+ EXPECT_EQ(Vocabulary::getIndex(*C),
EXPECTED_VOCAB_OPERAND_SLOT(Vocabulary::OperandKind::ConstantID));
// Test Pointer operand
BasicBlock *BB = BasicBlock::Create(Ctx, "entry", F);
AllocaInst *PtrVal = new AllocaInst(Type::getInt32Ty(Ctx), 0, "ptr", BB);
- EXPECT_EQ(Vocabulary::getSlotIndex(*PtrVal),
+ EXPECT_EQ(Vocabulary::getIndex(*PtrVal),
EXPECTED_VOCAB_OPERAND_SLOT(Vocabulary::OperandKind::PointerID));
// Test Variable operand (function argument)
Argument *Arg = F->getArg(0);
- EXPECT_EQ(Vocabulary::getSlotIndex(*Arg),
+ EXPECT_EQ(Vocabulary::getIndex(*Arg),
EXPECTED_VOCAB_OPERAND_SLOT(Vocabulary::OperandKind::VariableID));
#undef EXPECTED_VOCAB_OPERAND_SLOT
+
+ // Test getIndex for predicates
+#define EXPECTED_VOCAB_PREDICATE_SLOT(X) \
+ MaxOpcodes + MaxCanonicalTypeIDs + MaxOperands + static_cast<unsigned>(X)
+ for (unsigned P = CmpInst::FIRST_FCMP_PREDICATE;
+ P <= CmpInst::LAST_FCMP_PREDICATE; ++P) {
+ CmpInst::Predicate Pred = static_cast<CmpInst::Predicate>(P);
+ unsigned ExpectedIdx =
+ EXPECTED_VOCAB_PREDICATE_SLOT((P - CmpInst::FIRST_FCMP_PREDICATE));
+ EXPECT_EQ(Vocabulary::getIndex(Pred), ExpectedIdx);
+ }
+ auto ICMP_Start = CmpInst::LAST_FCMP_PREDICATE + 1;
+ for (unsigned P = CmpInst::FIRST_ICMP_PREDICATE;
+ P <= CmpInst::LAST_ICMP_PREDICATE; ++P) {
+ CmpInst::Predicate Pred = static_cast<CmpInst::Predicate>(P);
+ unsigned ExpectedIdx = EXPECTED_VOCAB_PREDICATE_SLOT(
+ ICMP_Start + P - CmpInst::FIRST_ICMP_PREDICATE);
+ EXPECT_EQ(Vocabulary::getIndex(Pred), ExpectedIdx);
+ }
+#undef EXPECTED_VOCAB_PREDICATE_SLOT
}
#if GTEST_HAS_DEATH_TEST
#ifndef NDEBUG
TEST(IR2VecVocabularyTest, NumericIDMapInvalidInputs) {
// Test invalid opcode IDs
- EXPECT_DEATH(Vocabulary::getSlotIndex(0u), "Invalid opcode");
- EXPECT_DEATH(Vocabulary::getSlotIndex(MaxOpcodes + 1), "Invalid opcode");
+ EXPECT_DEATH(Vocabulary::getIndex(0u), "Invalid opcode");
+ EXPECT_DEATH(Vocabulary::getIndex(MaxOpcodes + 1), "Invalid opcode");
// Test invalid type IDs
- EXPECT_DEATH(Vocabulary::getSlotIndex(static_cast<Type::TypeID>(MaxTypeIDs)),
+ EXPECT_DEATH(Vocabulary::getIndex(static_cast<Type::TypeID>(MaxTypeIDs)),
+ "Invalid type ID");
+ EXPECT_DEATH(Vocabulary::getIndex(static_cast<Type::TypeID>(MaxTypeIDs + 10)),
"Invalid type ID");
- EXPECT_DEATH(
- Vocabulary::getSlotIndex(static_cast<Type::TypeID>(MaxTypeIDs + 10)),
- "Invalid type ID");
}
#endif // NDEBUG
#endif // GTEST_HAS_DEATH_TEST
@@ -551,7 +575,7 @@ TEST(IR2VecVocabularyTest, StringKeyGeneration) {
EXPECT_EQ(Vocabulary::getStringKey(12), "Add");
#define EXPECT_OPCODE(NUM, OPCODE, CLASS) \
- EXPECT_EQ(Vocabulary::getStringKey(Vocabulary::getSlotIndex(NUM)), \
+ EXPECT_EQ(Vocabulary::getStringKey(Vocabulary::getIndex(NUM)), \
Vocabulary::getVocabKeyForOpcode(NUM));
#define HANDLE_INST(NUM, OPCODE, CLASS) EXPECT_OPCODE(NUM, OPCODE, CLASS)
#include "llvm/IR/Instruction.def"
@@ -569,6 +593,7 @@ TEST(IR2VecVocabularyTest, StringKeyGeneration) {
#undef EXPECT_CANONICAL_TYPE_NAME
+ // Verify OperandKind -> string mapping
#define HANDLE_OPERAND_KINDS(X) \
X(FunctionID, "Function") \
X(PointerID, "Pointer") \
@@ -592,6 +617,28 @@ TEST(IR2VecVocabularyTest, StringKeyGeneration) {
Vocabulary::getStringKey(MaxOpcodes + MaxCanonicalTypeIDs + 1);
EXPECT_EQ(FuncArgKey, "Function");
EXPECT_EQ(PtrArgKey, "Pointer");
+
+// Verify PredicateKind -> string mapping
+#define EXPECT_PREDICATE_KIND(PredNum, PredPos, PredKind) \
+ do { \
+ std::string PredStr = \
+ std::string(PredKind) + "_" + \
+ CmpInst::getPredicateName(static_cast<CmpInst::Predicate>(PredNum)) \
+ .str(); \
+ unsigned Pos = MaxOpcodes + MaxCanonicalTypeIDs + MaxOperands + PredPos; \
+ EXPECT_EQ(Vocabulary::getStringKey(Pos), PredStr); \
+ } while (0)
+
+ for (unsigned P = CmpInst::FIRST_FCMP_PREDICATE;
+ P <= CmpInst::LAST_FCMP_PREDICATE; ++P)
+ EXPECT_PREDICATE_KIND(P, P - CmpInst::FIRST_FCMP_PREDICATE, "FCMP");
+
+ auto ICMP_Pos = CmpInst::LAST_FCMP_PREDICATE + 1;
+ for (unsigned P = CmpInst::FIRST_ICMP_PREDICATE;
+ P <= CmpInst::LAST_ICMP_PREDICATE; ++P)
+ EXPECT_PREDICATE_KIND(P, ICMP_Pos++, "ICMP");
+
+#undef EXPECT_PREDICATE_KIND
}
TEST(IR2VecVocabularyTest, VocabularyDimensions) {
@@ -627,10 +674,12 @@ TEST(IR2VecVocabularyTest, InvalidAccess) {
#endif // GTEST_HAS_DEATH_TEST
TEST(IR2VecVocabularyTest, TypeIDStringKeyMapping) {
+ Vocabulary V = Vocabulary(Vocabulary::createDummyVocabForTest());
#define EXPECT_TYPE_TO_CANONICAL(TypeIDTok, CanonEnum, CanonStr) \
- EXPECT_EQ( \
- Vocabulary::getStringKey(Vocabulary::getSlotIndex(Type::TypeIDTok)), \
- CanonStr);
+ do { \
+ unsigned FlatIdx = V.getIndex(Type::TypeIDTok); \
+ EXPECT_EQ(Vocabulary::getStringKey(FlatIdx), CanonStr); \
+ } while (0);
IR2VEC_HANDLE_TYPE_BIMAP(EXPECT_TYPE_TO_CANONICAL)
@@ -638,14 +687,20 @@ TEST(IR2VecVocabularyTest, TypeIDStringKeyMapping) {
}
TEST(IR2VecVocabularyTest, InvalidVocabularyConstruction) {
- std::vector<Embedding> InvalidVocab;
- InvalidVocab.push_back(Embedding(2, 1.0));
- InvalidVocab.push_back(Embedding(2, 2.0));
-
- Vocabulary V(std::move(InvalidVocab));
+ // Test 1: Create invalid VocabStorage with insufficient sections
+ std::vector<std::vector<Embedding>> InvalidSectionData;
+ // Only add one section with 2 embeddings, but the vocabulary needs 4 sections
+ std::vector<Embedding> Section1;
+ Section1.push_back(Embedding(2, 1.0));
+ Section1.push_back(Embedding(2, 2.0));
+ InvalidSectionData.push_back(std::move(Section1));
+
+ VocabStorage InvalidStorage(std::move(InvalidSectionData));
+ Vocabulary V(std::move(InvalidStorage));
EXPECT_FALSE(V.isValid());
{
+ // Test 2: Default-constructed vocabulary should be invalid
Vocabulary InvalidResult;
EXPECT_FALSE(InvalidResult.isValid());
#if GTEST_HAS_DEATH_TEST
@@ -656,4 +711,265 @@ TEST(IR2VecVocabularyTest, InvalidVocabularyConstruction) {
}
}
+TEST(VocabStorageTest, DefaultConstructor) {
+ VocabStorage storage;
+
+ EXPECT_EQ(storage.size(), 0u);
+ EXPECT_EQ(storage.getNumSections(), 0u);
+ EXPECT_EQ(storage.getDimension(), 0u);
+ EXPECT_FALSE(storage.isValid());
+
+ // Test iterators on empty storage
+ EXPECT_EQ(storage.begin(), storage.end());
+}
+
+TEST(VocabStorageTest, BasicConstruction) {
+ // Create test data with 3 sections
+ std::vector<std::vector<Embedding>> sectionData;
+
+ // Section 0: 2 embeddings of dimension 3
+ std::vector<Embedding> section0;
+ section0.emplace_back(std::vector<double>{1.0, 2.0, 3.0});
+ section0.emplace_back(std::vector<double>{4.0, 5.0, 6.0});
+ sectionData.push_back(std::move(section0));
+
+ // Section 1: 1 embedding of dimension 3
+ std::vector<Embedding> section1;
+ section1.emplace_back(std::vector<double>{7.0, 8.0, 9.0});
+ sectionData.push_back(std::move(section1));
+
+ // Section 2: 3 embeddings of dimension 3
+ std::vector<Embedding> section2;
+ section2.emplace_back(std::vector<double>{10.0, 11.0, 12.0});
+ section2.emplace_back(std::vector<double>{13.0, 14.0, 15.0});
+ section2.emplace_back(std::vector<double>{16.0, 17.0, 18.0});
+ sectionData.push_back(std::move(section2));
+
+ VocabStorage storage(std::move(sectionData));
+
+ EXPECT_EQ(storage.size(), 6u); // Total: 2 + 1 + 3 = 6
+ EXPECT_EQ(storage.getNumSections(), 3u);
+ EXPECT_EQ(storage.getDimension(), 3u);
+ EXPECT_TRUE(storage.isValid());
+}
+
+TEST(VocabStorageTest, SectionAccess) {
+ // Create test data
+ std::vector<std::vector<Embedding>> sectionData;
+
+ std::vector<Embedding> section0;
+ section0.emplace_back(std::vector<double>{1.0, 2.0});
+ section0.emplace_back(std::vector<double>{3.0, 4.0});
+ sectionData.push_back(std::move(section0));
+
+ std::vector<Embedding> section1;
+ section1.emplace_back(std::vector<double>{5.0, 6.0});
+ sectionData.push_back(std::move(section1));
+
+ VocabStorage storage(std::move(sectionData));
+
+ // Test section access
+ EXPECT_EQ(storage[0].size(), 2u);
+ EXPECT_EQ(storage[1].size(), 1u);
+
+ // Test embedding values
+ EXPECT_THAT(storage[0][0].getData(), ElementsAre(1.0, 2.0));
+ EXPECT_THAT(storage[0][1].getData(), ElementsAre(3.0, 4.0));
+ EXPECT_THAT(storage[1][0].getData(), ElementsAre(5.0, 6.0));
+}
+
+#if GTEST_HAS_DEATH_TEST
+#ifndef NDEBUG
+TEST(VocabStorageTest, InvalidSectionAccess) {
+ std::vector<std::vector<Embedding>> sectionData;
+ std::vector<Embedding> section0;
+ section0.emplace_back(std::vector<double>{1.0, 2.0});
+ sectionData.push_back(std::move(section0));
+
+ VocabStorage storage(std::move(sectionData));
+
+ EXPECT_DEATH(storage[1], "Invalid section ID");
+ EXPECT_DEATH(storage[10], "Invalid section ID");
+}
+
+TEST(VocabStorageTest, EmptySection) {
+ std::vector<std::vector<Embedding>> sectionData;
+ std::vector<Embedding> emptySection; // Empty section
+ sectionData.push_back(std::move(emptySection));
+
+ std::vector<Embedding> validSection;
+ validSection.emplace_back(std::vector<double>{1.0});
+ sectionData.push_back(std::move(validSection));
+
+ EXPECT_DEATH(VocabStorage(std::move(sectionData)),
+ "Vocabulary section is empty");
+}
+
+TEST(VocabStorageTest, EmptyMiddleSection) {
+ std::vector<std::vector<Embedding>> sectionData;
+
+ // Valid first section
+ std::vector<Embedding> validSection1;
+ validSection1.emplace_back(std::vector<double>{1.0});
+ sectionData.push_back(std::move(validSection1));
+
+ // Empty middle section
+ std::vector<Embedding> emptySection;
+ sectionData.push_back(std::move(emptySection));
+
+ // Valid last section
+ std::vector<Embedding> validSection2;
+ validSection2.emplace_back(std::vector<double>{2.0});
+ sectionData.push_back(std::move(validSection2));
+
+ EXPECT_DEATH(VocabStorage(std::move(sectionData)),
+ "Vocabulary section is empty");
+}
+
+TEST(VocabStorageTest, NoSections) {
+ std::vector<std::vector<Embedding>> sectionData; // No sections
+
+ EXPECT_DEATH(VocabStorage(std::move(sectionData)),
+ "Vocabulary has no sections");
+}
+
+TEST(VocabStorageTest, MismatchedDimensionsAcrossSections) {
+ std::vector<std::vector<Embedding>> sectionData;
+
+ // Section 0: embeddings with dimension 2
+ std::vector<Embedding> section0;
+ section0.emplace_back(std::vector<double>{1.0, 2.0});
+ section0.emplace_back(std::vector<double>{3.0, 4.0});
+ sectionData.push_back(std::move(section0));
+
+ // Section 1: embedding with dimension 3 (mismatch!)
+ std::vector<Embedding> section1;
+ section1.emplace_back(std::vector<double>{5.0, 6.0, 7.0});
+ sectionData.push_back(std::move(section1));
+
+ EXPECT_DEATH(VocabStorage(std::move(sectionData)),
+ "All embeddings must have the same dimension");
+}
+
+TEST(VocabStorageTest, MismatchedDimensionsWithinSection) {
+ std::vector<std::vector<Embedding>> sectionData;
+
+ // Section 0: first embedding with dimension 2, second with dimension 3
+ std::vector<Embedding> section0;
+ section0.emplace_back(std::vector<double>{1.0, 2.0});
+ section0.emplace_back(std::vector<double>{3.0, 4.0, 5.0}); // Mismatch!
+ sectionData.push_back(std::move(section0));
+
+ EXPECT_DEATH(VocabStorage(std::move(sectionData)),
+ "All embeddings must have the same dimension");
+}
+#endif // NDEBUG
+#endif // GTEST_HAS_DEATH_TEST
+
+TEST(VocabStorageTest, IteratorBasics) {
+ std::vector<std::vector<Embedding>> sectionData;
+
+ std::vector<Embedding> section0;
+ section0.emplace_back(std::vector<double>{1.0, 2.0});
+ section0.emplace_back(std::vector<double>{3.0, 4.0});
+ sectionData.push_back(std::move(section0));
+
+ std::vector<Embedding> section1;
+ section1.emplace_back(std::vector<double>{5.0, 6.0});
+ sectionData.push_back(std::move(section1));
+
+ VocabStorage storage(std::move(sectionData));
+
+ // Test iterator basics
+ auto it = storage.begin();
+ auto end = storage.end();
+
+ EXPECT_NE(it, end);
+
+ // Check first embedding
+ EXPECT_THAT((*it).getData(), ElementsAre(1.0, 2.0));
+
+ // Advance to second embedding
+ ++it;
+ EXPECT_NE(it, end);
+ EXPECT_THAT((*it).getData(), ElementsAre(3.0, 4.0));
+
+ // Advance to third embedding (in section 1)
+ ++it;
+ EXPECT_NE(it, end);
+ EXPECT_THAT((*it).getData(), ElementsAre(5.0, 6.0));
+
+ // Advance past the end
+ ++it;
+ EXPECT_EQ(it, end);
+}
+
+TEST(VocabStorageTest, IteratorTraversal) {
+ std::vector<std::vector<Embedding>> sectionData;
+
+ // Section 0: 2 embeddings
+ std::vector<Embedding> section0;
+ section0.emplace_back(std::vector<double>{10.0});
+ section0.emplace_back(std::vector<double>{20.0});
+ sectionData.push_back(std::move(section0));
+
+ // Section 1: 1 embedding
+ std::vector<Embedding> section1;
+ section1.emplace_back(std::vector<double>{25.0});
+ sectionData.push_back(std::move(section1));
+
+ // Section 2: 3 embeddings
+ std::vector<Embedding> section2;
+ section2.emplace_back(std::vector<double>{30.0});
+ section2.emplace_back(std::vector<double>{40.0});
+ section2.emplace_back(std::vector<double>{50.0});
+ sectionData.push_back(std::move(section2));
+
+ VocabStorage storage(std::move(sectionData));
+
+ // Collect all values using iterator
+ std::vector<double> values;
+ for (const auto &emb : storage) {
+ EXPECT_EQ(emb.size(), 1u);
+ values.push_back(emb[0]);
+ }
+
+ // Should get all embeddings from all sections
+ EXPECT_THAT(values, ElementsAre(10.0, 20.0, 25.0, 30.0, 40.0, 50.0));
+}
+
+TEST(VocabStorageTest, IteratorComparison) {
+ std::vector<std::vector<Embedding>> sectionData;
+ std::vector<Embedding> section0;
+ section0.emplace_back(std::vector<double>{1.0});
+ section0.emplace_back(std::vector<double>{2.0});
+ sectionData.push_back(std::move(section0));
+
+ VocabStorage storage(std::move(sectionData));
+
+ auto it1 = storage.begin();
+ auto it2 = storage.begin();
+ auto end = storage.end();
+
+ // Test equality
+ EXPECT_EQ(it1, it2);
+ EXPECT_NE(it1, end);
+
+ // Advance one iterator
+ ++it1;
+ EXPECT_NE(it1, it2);
+ EXPECT_NE(it1, end);
+
+ // Advance second iterator to match
+ ++it2;
+ EXPECT_EQ(it1, it2);
+
+ // Advance both to end
+ ++it1;
+ ++it2;
+ EXPECT_EQ(it1, end);
+ EXPECT_EQ(it2, end);
+ EXPECT_EQ(it1, it2);
+}
+
} // end anonymous namespace
diff --git a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp
index 8c4fd8b..d8457a3 100644
--- a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp
+++ b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp
@@ -24,7 +24,9 @@
using namespace llvm;
using namespace llvm::memprof;
+namespace llvm {
LLVM_ABI extern cl::opt<bool> MemProfKeepAllNotColdContexts;
+} // end namespace llvm
namespace {
@@ -228,8 +230,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
CallBase *Call = findCall(*Func, "call");
Trie.buildAndAttachMIBMetadata(Call);
- EXPECT_TRUE(Call->hasFnAttr("memprof"));
- EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
+ EXPECT_FALSE(Call->hasFnAttr("memprof"));
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
ASSERT_EQ(MemProfMD->getNumOperands(), 2u);
@@ -278,8 +279,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
CallBase *Call = findCall(*Func, "call");
Trie.buildAndAttachMIBMetadata(Call);
- EXPECT_TRUE(Call->hasFnAttr("memprof"));
- EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
+ EXPECT_FALSE(Call->hasFnAttr("memprof"));
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
ASSERT_EQ(MemProfMD->getNumOperands(), 2u);
@@ -333,8 +333,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
CallBase *Call = findCall(*Func, "call");
Trie.buildAndAttachMIBMetadata(Call);
- EXPECT_TRUE(Call->hasFnAttr("memprof"));
- EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
+ EXPECT_FALSE(Call->hasFnAttr("memprof"));
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
ASSERT_EQ(MemProfMD->getNumOperands(), 2u);
@@ -393,8 +392,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
CallBase *Call = findCall(*Func, "call");
Trie.buildAndAttachMIBMetadata(Call);
- EXPECT_TRUE(Call->hasFnAttr("memprof"));
- EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
+ EXPECT_FALSE(Call->hasFnAttr("memprof"));
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
ASSERT_EQ(MemProfMD->getNumOperands(), 2u);
@@ -465,8 +463,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
ASSERT_NE(Call, nullptr);
Trie.buildAndAttachMIBMetadata(Call);
- EXPECT_TRUE(Call->hasFnAttr("memprof"));
- EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
+ EXPECT_FALSE(Call->hasFnAttr("memprof"));
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
EXPECT_THAT(MemProfMD, MemprofMetadataEquals(ExpectedVals));
@@ -539,8 +536,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
// Restore original option value.
MemProfKeepAllNotColdContexts = OrigMemProfKeepAllNotColdContexts;
- EXPECT_TRUE(Call->hasFnAttr("memprof"));
- EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
+ EXPECT_FALSE(Call->hasFnAttr("memprof"));
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
EXPECT_THAT(MemProfMD, MemprofMetadataEquals(ExpectedVals));
@@ -668,8 +664,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
// The hot allocations will be converted to NotCold and pruned as they
// are unnecessary to determine how to clone the cold allocation.
- EXPECT_TRUE(Call->hasFnAttr("memprof"));
- EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
+ EXPECT_FALSE(Call->hasFnAttr("memprof"));
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
ASSERT_EQ(MemProfMD->getNumOperands(), 2u);
diff --git a/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp b/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp
index 45dc50e..c8752c7 100644
--- a/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp
+++ b/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp
@@ -25,9 +25,10 @@
#include "llvm/Support/raw_ostream.h"
#include "gtest/gtest.h"
-LLVM_ABI extern llvm::cl::opt<bool> ScalePartialSampleProfileWorkingSetSize;
-
namespace llvm {
+
+LLVM_ABI extern cl::opt<bool> ScalePartialSampleProfileWorkingSetSize;
+
namespace {
class ProfileSummaryInfoTest : public testing::Test {
diff --git a/llvm/unittests/CodeGen/RegAllocScoreTest.cpp b/llvm/unittests/CodeGen/RegAllocScoreTest.cpp
index 86bfc7a..432dc93 100644
--- a/llvm/unittests/CodeGen/RegAllocScoreTest.cpp
+++ b/llvm/unittests/CodeGen/RegAllocScoreTest.cpp
@@ -31,11 +31,14 @@
#include "gtest/gtest.h"
using namespace llvm;
+
+namespace llvm {
LLVM_ABI extern cl::opt<double> CopyWeight;
LLVM_ABI extern cl::opt<double> LoadWeight;
LLVM_ABI extern cl::opt<double> StoreWeight;
LLVM_ABI extern cl::opt<double> CheapRematWeight;
LLVM_ABI extern cl::opt<double> ExpensiveRematWeight;
+} // namespace llvm
namespace {
// Include helper functions to ease the manipulation of MachineFunctions.
diff --git a/llvm/unittests/Frontend/CMakeLists.txt b/llvm/unittests/Frontend/CMakeLists.txt
index 836a844..1ce34e7 100644
--- a/llvm/unittests/Frontend/CMakeLists.txt
+++ b/llvm/unittests/Frontend/CMakeLists.txt
@@ -1,5 +1,6 @@
set(LLVM_LINK_COMPONENTS
Analysis
+ BinaryFormat
Core
FrontendHLSL
FrontendOffloading
diff --git a/llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp b/llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp
index 1eb03f1..451c376 100644
--- a/llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp
+++ b/llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp
@@ -266,7 +266,8 @@ TEST(HLSLRootSignatureTest, DefaultStaticSamplerDump) {
"minLOD = 0.000000e+00, "
"maxLOD = 3.402823e+38, "
"space = 0, "
- "visibility = All"
+ "visibility = All, "
+ "flags = None"
")";
EXPECT_EQ(Out, Expected);
}
@@ -287,6 +288,7 @@ TEST(HLSLRootSignatureTest, DefinedStaticSamplerDump) {
Sampler.MaxLOD = 32.0f;
Sampler.Space = 7;
Sampler.Visibility = llvm::dxbc::ShaderVisibility::Domain;
+ Sampler.Flags = llvm::dxbc::StaticSamplerFlags::NonNormalizedCoordinates;
std::string Out;
llvm::raw_string_ostream OS(Out);
@@ -305,7 +307,8 @@ TEST(HLSLRootSignatureTest, DefinedStaticSamplerDump) {
"minLOD = 1.000000e+00, "
"maxLOD = 3.200000e+01, "
"space = 7, "
- "visibility = Domain"
+ "visibility = Domain, "
+ "flags = NonNormalizedCoordinates"
")";
EXPECT_EQ(Out, Expected);
}
diff --git a/llvm/unittests/Object/ELFTest.cpp b/llvm/unittests/Object/ELFTest.cpp
index faf855c..7c68ab5 100644
--- a/llvm/unittests/Object/ELFTest.cpp
+++ b/llvm/unittests/Object/ELFTest.cpp
@@ -7,6 +7,10 @@
//===----------------------------------------------------------------------===//
#include "llvm/Object/ELF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/ObjectYAML/yaml2obj.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/YAMLTraits.h"
#include "llvm/Testing/Support/Error.h"
#include "gtest/gtest.h"
@@ -310,3 +314,71 @@ TEST(ELFTest, Hash) {
// presuming 32-bit long. Thus make sure that extra bit doesn't appear.
EXPECT_EQ(hashSysV("ZZZZZW9p"), 0U);
}
+
+template <class ELFT>
+static Expected<ELFObjectFile<ELFT>> toBinary(SmallVectorImpl<char> &Storage,
+ StringRef Yaml) {
+ raw_svector_ostream OS(Storage);
+ yaml::Input YIn(Yaml);
+ if (!yaml::convertYAML(YIn, OS, [](const Twine &Msg) {}))
+ return createStringError(std::errc::invalid_argument,
+ "unable to convert YAML");
+ return ELFObjectFile<ELFT>::create(MemoryBufferRef(OS.str(), "dummyELF"));
+}
+
+TEST(ELFObjectFileTest, ELFNoteIteratorOverflow) {
+ using Elf_Shdr_Range = ELFFile<ELF64LE>::Elf_Shdr_Range;
+ using Elf_Phdr_Range = ELFFile<ELF64LE>::Elf_Phdr_Range;
+
+ SmallString<0> Storage;
+ Expected<ELFObjectFile<ELF64LE>> ElfOrErr = toBinary<ELF64LE>(Storage, R"(
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+ Machine: EM_X86_64
+ProgramHeaders:
+ - Type: PT_NOTE
+ FileSize: 0xffffffffffffff88
+ FirstSec: .note.gnu.build-id
+ LastSec: .note.gnu.build-id
+Sections:
+ - Name: .note.gnu.build-id
+ Type: SHT_NOTE
+ AddressAlign: 0x04
+ ShOffset: 0xffffffffffffff88
+ Notes:
+ - Name: "GNU"
+ Desc: "abb50d82b6bdc861"
+ Type: 3
+)");
+ ASSERT_THAT_EXPECTED(ElfOrErr, Succeeded());
+ ELFFile<ELF64LE> Obj = ElfOrErr.get().getELFFile();
+
+ auto CheckOverflow = [&](auto &&PhdrOrShdr, uint64_t Offset, uint64_t Size) {
+ Error Err = Error::success();
+ Obj.notes(PhdrOrShdr, Err);
+
+ std::string ErrMessage;
+ handleAllErrors(std::move(Err), [&](const ErrorInfoBase &EI) {
+ ErrMessage = EI.message();
+ });
+
+ EXPECT_EQ(ErrMessage, ("invalid offset (0x" + Twine::utohexstr(Offset) +
+ ") or size (0x" + Twine::utohexstr(Size) + ")")
+ .str());
+ };
+
+ Expected<Elf_Phdr_Range> PhdrsOrErr = Obj.program_headers();
+ EXPECT_FALSE(!PhdrsOrErr);
+ for (Elf_Phdr_Impl<ELF64LE> P : *PhdrsOrErr)
+ if (P.p_type == ELF::PT_NOTE)
+ CheckOverflow(P, P.p_offset, P.p_filesz);
+
+ Expected<Elf_Shdr_Range> ShdrsOrErr = Obj.sections();
+ EXPECT_FALSE(!ShdrsOrErr);
+ for (Elf_Shdr_Impl<ELF64LE> S : *ShdrsOrErr)
+ if (S.sh_type == ELF::SHT_NOTE)
+ CheckOverflow(S, S.sh_offset, S.sh_size);
+}
diff --git a/llvm/unittests/ProfileData/MemProfTest.cpp b/llvm/unittests/ProfileData/MemProfTest.cpp
index abe36bc..6ea951e 100644
--- a/llvm/unittests/ProfileData/MemProfTest.cpp
+++ b/llvm/unittests/ProfileData/MemProfTest.cpp
@@ -26,13 +26,14 @@
#include <initializer_list>
-LLVM_ABI extern llvm::cl::opt<float> MemProfLifetimeAccessDensityColdThreshold;
-LLVM_ABI extern llvm::cl::opt<unsigned> MemProfAveLifetimeColdThreshold;
-LLVM_ABI extern llvm::cl::opt<unsigned>
+namespace llvm {
+
+LLVM_ABI extern cl::opt<float> MemProfLifetimeAccessDensityColdThreshold;
+LLVM_ABI extern cl::opt<unsigned> MemProfAveLifetimeColdThreshold;
+LLVM_ABI extern cl::opt<unsigned>
MemProfMinAveLifetimeAccessDensityHotThreshold;
-LLVM_ABI extern llvm::cl::opt<bool> MemProfUseHotHints;
+LLVM_ABI extern cl::opt<bool> MemProfUseHotHints;
-namespace llvm {
namespace memprof {
namespace {
diff --git a/llvm/unittests/Support/Path.cpp b/llvm/unittests/Support/Path.cpp
index 888729b..eb649de 100644
--- a/llvm/unittests/Support/Path.cpp
+++ b/llvm/unittests/Support/Path.cpp
@@ -255,14 +255,14 @@ TEST(Support, Path) {
{
SmallString<32> Relative("foo.cpp");
- sys::fs::make_absolute("/root", Relative);
+ path::make_absolute("/root", Relative);
Relative[5] = '/'; // Fix up windows paths.
ASSERT_EQ("/root/foo.cpp", Relative);
}
{
SmallString<32> Relative("foo.cpp");
- sys::fs::make_absolute("//root", Relative);
+ path::make_absolute("//root", Relative);
Relative[6] = '/'; // Fix up windows paths.
ASSERT_EQ("//root/foo.cpp", Relative);
}
diff --git a/llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn b/llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn
index d4ec80b..c143acf 100644
--- a/llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn
+++ b/llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn
@@ -36,6 +36,7 @@ static_library("Core") {
"GDBIndex.cpp",
"HashUtilities.cpp",
"JumpTable.cpp",
+ "MCInstUtils.cpp",
"MCPlusBuilder.cpp",
"ParallelUtilities.cpp",
"Relocation.cpp",
diff --git a/llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn b/llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn
index b856d1c..764ebb9 100644
--- a/llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn
+++ b/llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn
@@ -28,6 +28,7 @@ static_library("Rewrite") {
"BuildIDRewriter.cpp",
"DWARFRewriter.cpp",
"ExecutableFileMemoryManager.cpp",
+ "GNUPropertyRewriter.cpp",
"JITLinkLinker.cpp",
"LinuxKernelRewriter.cpp",
"MachORewriteInstance.cpp",
diff --git a/llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni b/llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni
index ac48b94..2ab2a0e 100644
--- a/llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni
+++ b/llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni
@@ -526,6 +526,13 @@ if (current_cpu == "ve") {
]
}
+if (current_cpu == "wasm") {
+ builtins_sources += [
+ "wasm/__c_longjmp.S",
+ "wasm/__cpp_exceptions.S",
+ ]
+}
+
if (!compiler_rt_exclude_atomic_builtin) {
builtins_sources += [ "atomic.c" ]
}
diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
index 42a7940..f771099 100644
--- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
+++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
@@ -1496,6 +1496,7 @@ if (current_toolchain == default_toolchain) {
"__type_traits/is_floating_point.h",
"__type_traits/is_function.h",
"__type_traits/is_fundamental.h",
+ "__type_traits/is_generic_transparent_comparator.h",
"__type_traits/is_implicit_lifetime.h",
"__type_traits/is_implicitly_default_constructible.h",
"__type_traits/is_integral.h",
@@ -1538,6 +1539,7 @@ if (current_toolchain == default_toolchain) {
"__type_traits/make_32_64_or_128_bit.h",
"__type_traits/make_const_lvalue_ref.h",
"__type_traits/make_signed.h",
+ "__type_traits/make_transparent.h",
"__type_traits/make_unsigned.h",
"__type_traits/maybe_const.h",
"__type_traits/nat.h",
diff --git a/llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn
index 2f692d7..c37f43c 100644
--- a/llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn
@@ -4,9 +4,11 @@ static_library("CAS") {
"ActionCache.cpp",
"ActionCaches.cpp",
"BuiltinCAS.cpp",
+ "DatabaseFile.cpp",
"InMemoryCAS.cpp",
"MappedFileRegionArena.cpp",
"ObjectStore.cpp",
"OnDiskCommon.cpp",
+ "OnDiskTrieRawHashMap.cpp",
]
}
diff --git a/llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn
index de6de0b..ccb447f 100644
--- a/llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn
@@ -10,6 +10,7 @@ unittest("CASTests") {
"ActionCacheTest.cpp",
"CASTestConfig.cpp",
"ObjectStoreTest.cpp",
+ "OnDiskTrieRawHashMapTest.cpp",
"ProgramTest.cpp",
]
}
diff --git a/llvm/utils/profcheck-xfail.txt b/llvm/utils/profcheck-xfail.txt
index 08c8944..77e6ab7 100644
--- a/llvm/utils/profcheck-xfail.txt
+++ b/llvm/utils/profcheck-xfail.txt
@@ -1414,7 +1414,6 @@ Transforms/SimplifyCFG/merge-cond-stores.ll
Transforms/SimplifyCFG/multiple-phis.ll
Transforms/SimplifyCFG/PhiBlockMerge.ll
Transforms/SimplifyCFG/pr48641.ll
-Transforms/SimplifyCFG/preserve-branchweights.ll
Transforms/SimplifyCFG/preserve-store-alignment.ll
Transforms/SimplifyCFG/rangereduce.ll
Transforms/SimplifyCFG/RISCV/select-trunc-i64.ll
@@ -1424,7 +1423,6 @@ Transforms/SimplifyCFG/safe-abs.ll
Transforms/SimplifyCFG/SimplifyEqualityComparisonWithOnlyPredecessor-domtree-preservation-edgecase.ll
Transforms/SimplifyCFG/speculate-blocks.ll
Transforms/SimplifyCFG/speculate-derefable-load.ll
-Transforms/SimplifyCFG/suppress-zero-branch-weights.ll
Transforms/SimplifyCFG/switch_create-custom-dl.ll
Transforms/SimplifyCFG/switch_create.ll
Transforms/SimplifyCFG/switch-dup-bbs.ll
diff --git a/mlir/.clang-format b/mlir/.clang-format
index a74fda4..76cc928 100644
--- a/mlir/.clang-format
+++ b/mlir/.clang-format
@@ -1,2 +1,3 @@
BasedOnStyle: LLVM
AlwaysBreakTemplateDeclarations: Yes
+LineEnding: LF
diff --git a/mlir/include/mlir-c/IR.h b/mlir/include/mlir-c/IR.h
index 061d762..c464e4d 100644
--- a/mlir/include/mlir-c/IR.h
+++ b/mlir/include/mlir-c/IR.h
@@ -634,6 +634,10 @@ MLIR_CAPI_EXPORTED MlirContext mlirOperationGetContext(MlirOperation op);
/// Gets the location of the operation.
MLIR_CAPI_EXPORTED MlirLocation mlirOperationGetLocation(MlirOperation op);
+/// Sets the location of the operation.
+MLIR_CAPI_EXPORTED void mlirOperationSetLocation(MlirOperation op,
+ MlirLocation loc);
+
/// Gets the type id of the operation.
/// Returns null if the operation does not have a registered operation
/// description.
diff --git a/mlir/include/mlir/Bindings/Python/NanobindAdaptors.h b/mlir/include/mlir/Bindings/Python/NanobindAdaptors.h
index b5f985f..847951a 100644
--- a/mlir/include/mlir/Bindings/Python/NanobindAdaptors.h
+++ b/mlir/include/mlir/Bindings/Python/NanobindAdaptors.h
@@ -116,7 +116,8 @@ mlirApiObjectToCapsule(nanobind::handle apiObject) {
/// Casts object <-> MlirAffineMap.
template <>
struct type_caster<MlirAffineMap> {
- NB_TYPE_CASTER(MlirAffineMap, const_name("MlirAffineMap"))
+ NB_TYPE_CASTER(MlirAffineMap,
+ const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.AffineMap")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (auto capsule = mlirApiObjectToCapsule(src)) {
value = mlirPythonCapsuleToAffineMap(capsule->ptr());
@@ -138,7 +139,8 @@ struct type_caster<MlirAffineMap> {
/// Casts object <-> MlirAttribute.
template <>
struct type_caster<MlirAttribute> {
- NB_TYPE_CASTER(MlirAttribute, const_name("MlirAttribute"))
+ NB_TYPE_CASTER(MlirAttribute,
+ const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Attribute")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (auto capsule = mlirApiObjectToCapsule(src)) {
value = mlirPythonCapsuleToAttribute(capsule->ptr());
@@ -161,7 +163,7 @@ struct type_caster<MlirAttribute> {
/// Casts object -> MlirBlock.
template <>
struct type_caster<MlirBlock> {
- NB_TYPE_CASTER(MlirBlock, const_name("MlirBlock"))
+ NB_TYPE_CASTER(MlirBlock, const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Block")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (auto capsule = mlirApiObjectToCapsule(src)) {
value = mlirPythonCapsuleToBlock(capsule->ptr());
@@ -174,7 +176,8 @@ struct type_caster<MlirBlock> {
/// Casts object -> MlirContext.
template <>
struct type_caster<MlirContext> {
- NB_TYPE_CASTER(MlirContext, const_name("MlirContext"))
+ NB_TYPE_CASTER(MlirContext,
+ const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Context")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (src.is_none()) {
// Gets the current thread-bound context.
@@ -192,7 +195,8 @@ struct type_caster<MlirContext> {
/// Casts object <-> MlirDialectRegistry.
template <>
struct type_caster<MlirDialectRegistry> {
- NB_TYPE_CASTER(MlirDialectRegistry, const_name("MlirDialectRegistry"))
+ NB_TYPE_CASTER(MlirDialectRegistry,
+ const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.DialectRegistry")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (auto capsule = mlirApiObjectToCapsule(src)) {
value = mlirPythonCapsuleToDialectRegistry(capsule->ptr());
@@ -214,7 +218,8 @@ struct type_caster<MlirDialectRegistry> {
/// Casts object <-> MlirLocation.
template <>
struct type_caster<MlirLocation> {
- NB_TYPE_CASTER(MlirLocation, const_name("MlirLocation"))
+ NB_TYPE_CASTER(MlirLocation,
+ const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Location")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (src.is_none()) {
// Gets the current thread-bound context.
@@ -240,7 +245,7 @@ struct type_caster<MlirLocation> {
/// Casts object <-> MlirModule.
template <>
struct type_caster<MlirModule> {
- NB_TYPE_CASTER(MlirModule, const_name("MlirModule"))
+ NB_TYPE_CASTER(MlirModule, const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Module")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (auto capsule = mlirApiObjectToCapsule(src)) {
value = mlirPythonCapsuleToModule(capsule->ptr());
@@ -262,8 +267,9 @@ struct type_caster<MlirModule> {
/// Casts object <-> MlirFrozenRewritePatternSet.
template <>
struct type_caster<MlirFrozenRewritePatternSet> {
- NB_TYPE_CASTER(MlirFrozenRewritePatternSet,
- const_name("MlirFrozenRewritePatternSet"))
+ NB_TYPE_CASTER(
+ MlirFrozenRewritePatternSet,
+ const_name(MAKE_MLIR_PYTHON_QUALNAME("rewrite.FrozenRewritePatternSet")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (auto capsule = mlirApiObjectToCapsule(src)) {
value = mlirPythonCapsuleToFrozenRewritePatternSet(capsule->ptr());
@@ -285,7 +291,8 @@ struct type_caster<MlirFrozenRewritePatternSet> {
/// Casts object <-> MlirOperation.
template <>
struct type_caster<MlirOperation> {
- NB_TYPE_CASTER(MlirOperation, const_name("MlirOperation"))
+ NB_TYPE_CASTER(MlirOperation,
+ const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Operation")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (auto capsule = mlirApiObjectToCapsule(src)) {
value = mlirPythonCapsuleToOperation(capsule->ptr());
@@ -309,7 +316,7 @@ struct type_caster<MlirOperation> {
/// Casts object <-> MlirValue.
template <>
struct type_caster<MlirValue> {
- NB_TYPE_CASTER(MlirValue, const_name("MlirValue"))
+ NB_TYPE_CASTER(MlirValue, const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Value")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (auto capsule = mlirApiObjectToCapsule(src)) {
value = mlirPythonCapsuleToValue(capsule->ptr());
@@ -334,7 +341,8 @@ struct type_caster<MlirValue> {
/// Casts object -> MlirPassManager.
template <>
struct type_caster<MlirPassManager> {
- NB_TYPE_CASTER(MlirPassManager, const_name("MlirPassManager"))
+ NB_TYPE_CASTER(MlirPassManager, const_name(MAKE_MLIR_PYTHON_QUALNAME(
+ "passmanager.PassManager")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (auto capsule = mlirApiObjectToCapsule(src)) {
value = mlirPythonCapsuleToPassManager(capsule->ptr());
@@ -347,7 +355,7 @@ struct type_caster<MlirPassManager> {
/// Casts object <-> MlirTypeID.
template <>
struct type_caster<MlirTypeID> {
- NB_TYPE_CASTER(MlirTypeID, const_name("MlirTypeID"))
+ NB_TYPE_CASTER(MlirTypeID, const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.TypeID")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (auto capsule = mlirApiObjectToCapsule(src)) {
value = mlirPythonCapsuleToTypeID(capsule->ptr());
@@ -371,7 +379,7 @@ struct type_caster<MlirTypeID> {
/// Casts object <-> MlirType.
template <>
struct type_caster<MlirType> {
- NB_TYPE_CASTER(MlirType, const_name("MlirType"))
+ NB_TYPE_CASTER(MlirType, const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Type")))
bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept {
if (auto capsule = mlirApiObjectToCapsule(src)) {
value = mlirPythonCapsuleToType(capsule->ptr());
@@ -394,7 +402,7 @@ struct type_caster<MlirType> {
/// Casts MlirStringRef -> object.
template <>
struct type_caster<MlirStringRef> {
- NB_TYPE_CASTER(MlirStringRef, const_name("MlirStringRef"))
+ NB_TYPE_CASTER(MlirStringRef, const_name("str"))
static handle from_cpp(MlirStringRef s, rv_policy,
cleanup_list *cleanup) noexcept {
return nanobind::str(s.data, s.length).release();
diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
index 8b687a7..29001e2 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
@@ -985,7 +985,6 @@ class ScaleArgInfo<TypeConstraint argTyVal, string typeName> {
//===---------------------------------------------------------------------===//
// Scaled {fp4,bf8,fp8} to {bf16,f16,f32} conversion intrinsics
//===---------------------------------------------------------------------===//
-
foreach smallT = [
ScaleArgInfo<I32, "Fp4">,
ScaleArgInfo<ROCDL_V2I32Type, "Fp8">,
@@ -996,6 +995,8 @@ foreach smallT = [
ScaleArgInfo<ROCDL_V8BF16Type, "Bf16">,
ScaleArgInfo<ROCDL_V8F32Type, "F32">,
] in {
+
+ // Up-scaling
def ROCDL_CvtPkScalePk8 # largeT.nameForOp # smallT.nameForOp # Op :
ROCDL_ConcreteNonMemIntrOp<"cvt.scale.pk8." # largeT.name # "." # smallT.name,
[Pure], 1, [2], ["scaleSel"]>,
@@ -1010,13 +1011,30 @@ foreach smallT = [
attr-dict $src `,` $scale `[` $scaleSel `]` `:` type($res)
}];
}
+
+ // Down-scaling
+ def ROCDL_CvtScaleF32Pk8 # smallT.nameForOp # largeT.nameForOp # Op :
+ ROCDL_ConcreteNonMemIntrOp<"cvt.scalef32.pk8." # smallT.name # "." # largeT.name,
+ [Pure], 1>,
+ Arguments<(ins largeT.type:$src, F32:$scale)> {
+ let results = (outs smallT.type:$res);
+ let summary = "Scale and convert packed "
+ # largeT.name # " to packed " # smallT.name ;
+ let description = [{
+ Convert 8 packed }] # largeT.name # [{ values to packed }]
+ # smallT.name # [{, multiplying by the exponent part of `scale`
+ before doing so. This op is for gfx1250+ arch.
+ }];
+ let assemblyFormat = [{
+ attr-dict $src `,` $scale `:` type($res)
+ }];
+ }
} // foreach largeT
} // foreach smallTOp
//===---------------------------------------------------------------------===//
// Scaled {bf6,fp6} to {bf16,f16,f32} conversion intrinsics
//===---------------------------------------------------------------------===//
-
foreach smallT = [
ScaleArgInfo<ROCDL_V3I32Type, "Fp6">,
ScaleArgInfo<ROCDL_V3I32Type, "Bf6">
diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index 8f3232f..0d6ebc0 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -17,6 +17,7 @@ include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.td"
include "mlir/Dialect/Transform/IR/TransformTypes.td"
include "mlir/Dialect/SCF/IR/DeviceMappingInterface.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
+include "mlir/Interfaces/InferTypeOpInterface.td"
include "mlir/IR/OpBase.td"
include "mlir/IR/RegionKindInterface.td"
@@ -236,11 +237,51 @@ def BufferizeToAllocationOp : Op<Transform_Dialect,
Transform_AnyOpType:$new_ops);
let assemblyFormat = "$target attr-dict `:` type($target)";
let hasVerifier = 1;
+}
- let builders = [
- OpBuilder<(ins "Value":$target, "Attribute":$memorySpace)>,
- OpBuilder<(ins "Value":$target, "int64_t":$memorySpace)>
- ];
+//===----------------------------------------------------------------------===//
+// PromoteTensorOp
+//===----------------------------------------------------------------------===//
+
+def PromoteTensorOp : Op<Transform_Dialect, "structured.promote_tensor",
+ [DeclareOpInterfaceMethods<TransformOpInterface>,
+ DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
+ SameOperandsAndResultType]> {
+ let summary = "Request a tensor value to live in a specific memory space "
+ "after bufferization";
+ let description = [{
+ Requests that a tensor value lives in a specific memory space for its
+ lifetime. This is achieved by allocating a new tensor in the desired
+ memory space with `bufferization.alloc_tensor` and optionally materializing
+ the source value into that allocation with
+ `bufferization.materialize_in_destination`. All uses of the original value
+ are then redirected to the promoted value.
+
+ The generated code for promoting tensor value %0 resembles the following:
+
+ %1 = bufferization.alloc_tensor(<dynamic dims of %0>)
+ { memory_space = memory_space }
+ // Note: the materialization is omitted if %0 is never read and is only
+ // written into (i.e., it behaves as a result tensor).
+ %2 = bufferization.materialize_in_destination %0 in %1
+ // ...
+ <all users of %0 now use %2 instead>
+
+ Deallocation is not handled by this transform.
+
+ Return modes:
+ - Produces a silenceable failure if the given handle does not point to
+ tensor-typed values.
+ - Succeeds otherwise and returns a handle to the promoted value(s), i.e.,
+ the result of materialization if present and the allocation otherwise.
+ }];
+
+ let arguments = (ins TransformValueHandleTypeInterface:$tensor,
+ OptionalAttr<AnyAttr>:$memory_space);
+ let results = (outs TransformValueHandleTypeInterface:$promoted);
+
+ let assemblyFormat =
+ "(`to` $memory_space^)? $tensor attr-dict `:` type($tensor)";
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/Math/Transforms/Passes.td b/mlir/include/mlir/Dialect/Math/Transforms/Passes.td
index 4d415ae..48346abd 100644
--- a/mlir/include/mlir/Dialect/Math/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Math/Transforms/Passes.td
@@ -64,4 +64,12 @@ def MathExpandOpsPass : Pass<"math-expand-ops"> {
];
}
+def MathSincosFusionPass : Pass<"math-sincos-fusion"> {
+ let summary = "Fuse sin and cos operations.";
+ let description = [{
+ Fuse sin and cos operations into a sincos operation.
+ }];
+ let dependentDialects = ["math::MathDialect"];
+}
+
#endif // MLIR_DIALECT_MATH_TRANSFORMS_PASSES
diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
index 2bf953e..d4d67bf 100644
--- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
+++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
@@ -155,7 +155,7 @@ def AssumeAlignmentOp : MemRef_Op<"assume_alignment", [
The `assume_alignment` operation takes a memref and an integer alignment
value. It returns a new SSA value of the same memref type, but associated
with the assumption that the underlying buffer is aligned to the given
- alignment.
+ alignment.
If the buffer isn't aligned to the given alignment, its result is poison.
This operation doesn't affect the semantics of a program where the
@@ -170,7 +170,7 @@ def AssumeAlignmentOp : MemRef_Op<"assume_alignment", [
let assemblyFormat = "$memref `,` $alignment attr-dict `:` type($memref)";
let extraClassDeclaration = [{
MemRefType getType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
-
+
Value getViewSource() { return getMemref(); }
}];
@@ -179,6 +179,41 @@ def AssumeAlignmentOp : MemRef_Op<"assume_alignment", [
}
//===----------------------------------------------------------------------===//
+// DistinctObjectsOp
+//===----------------------------------------------------------------------===//
+
+def DistinctObjectsOp : MemRef_Op<"distinct_objects", [
+ Pure,
+ DeclareOpInterfaceMethods<InferTypeOpInterface>
+ // ViewLikeOpInterface TODO: ViewLikeOpInterface only supports a single argument
+ ]> {
+ let summary = "assumption that acesses to specific memrefs will never alias";
+ let description = [{
+ The `distinct_objects` operation takes a list of memrefs and returns the same
+ memrefs, with the additional assumption that accesses to them will never
+ alias with each other. This means that loads and stores to different
+ memrefs in the list can be safely reordered.
+
+ If the memrefs do alias, the load/store behavior is undefined. This
+ operation doesn't affect the semantics of a valid program. It is
+ intended for optimization purposes, allowing the compiler to generate more
+ efficient code based on the non-aliasing assumption. The optimization is
+ best-effort.
+
+ Example:
+
+ ```mlir
+ %1, %2 = memref.distinct_objects %a, %b : memref<?xf32>, memref<?xf32>
+ ```
+ }];
+ let arguments = (ins Variadic<AnyMemRef>:$operands);
+ let results = (outs Variadic<AnyMemRef>:$results);
+
+ let assemblyFormat = "$operands attr-dict `:` type($operands)";
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
// AllocOp
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPClauses.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPClauses.td
index 1eda5e4..8e43c42 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPClauses.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPClauses.td
@@ -996,6 +996,35 @@ class OpenMP_NumTeamsClauseSkip<
def OpenMP_NumTeamsClause : OpenMP_NumTeamsClauseSkip<>;
//===----------------------------------------------------------------------===//
+// V5.1: [10.1.2] `sizes` clause
+//===----------------------------------------------------------------------===//
+
+class OpenMP_SizesClauseSkip<
+ bit traits = false, bit arguments = false, bit assemblyFormat = false,
+ bit description = false, bit extraClassDeclaration = false
+ > : OpenMP_Clause<traits, arguments, assemblyFormat, description,
+ extraClassDeclaration> {
+ let arguments = (ins
+ Variadic<IntLikeType>:$sizes
+ );
+
+ let optAssemblyFormat = [{
+ `sizes` `(` $sizes `:` type($sizes) `)`
+ }];
+
+ let description = [{
+ The `sizes` clauses defines the size of a grid over a multi-dimensional
+ logical iteration space. This grid is used for loop transformations such as
+ `tile` and `strip`. The size per dimension can be a variable, but only
+ values that are not at least 2 make sense. It is not specified what happens
+ when smaller values are used, but should still result in a loop nest that
+ executes each logical iteration once.
+ }];
+}
+
+def OpenMP_SizesClause : OpenMP_SizesClauseSkip<>;
+
+//===----------------------------------------------------------------------===//
// V5.2: [10.1.2] `num_threads` clause
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td
index bbcfb87f..5ad4e4b 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td
@@ -38,6 +38,44 @@ def OpenMP_MapBoundsType : OpenMP_Type<"MapBounds", "map_bounds_ty"> {
let summary = "Type for representing omp map clause bounds information";
}
+//===---------------------------------------------------------------------===//
+// OpenMP Canonical Loop Info Type
+//===---------------------------------------------------------------------===//
+
+def CanonicalLoopInfoType : OpenMP_Type<"CanonicalLoopInfo", "cli"> {
+ let summary = "Type for representing a reference to a canonical loop";
+ let description = [{
+ A variable of type CanonicalLoopInfo refers to an OpenMP-compatible
+ canonical loop in the same function. Values of this type are not
+ available at runtime and therefore cannot be used by the program itself,
+ i.e. an opaque type. It is similar to the transform dialect's
+ `!transform.interface` type, but instead of implementing an interface
+ for each transformation, the OpenMP dialect itself defines possible
+ operations on this type.
+
+ A value of type CanonicalLoopInfoType (in the following: CLI) value can be
+
+ 1. created by omp.new_cli.
+ 2. passed to omp.canonical_loop to associate the loop to that CLI. A CLI
+ can only be associated once.
+ 3. passed to an omp loop transformation operation that modifies the loop
+ associated with the CLI. The CLI is the "applyee" and the operation is
+ the consumer. A CLI can only be consumed once.
+ 4. passed to an omp loop transformation operation to associate the cli with
+ a result of that transformation. The CLI is the "generatee" and the
+ operation is the generator.
+
+ A CLI cannot
+
+ 1. be returned from a function.
+ 2. be passed to operations that are not specifically designed to take a
+ CanonicalLoopInfoType, including AnyType.
+
+ A CLI directly corresponds to an object of
+ OpenMPIRBuilder's CanonicalLoopInfo struct when lowering to LLVM-IR.
+ }];
+}
+
//===----------------------------------------------------------------------===//
// Base classes for OpenMP dialect operations.
//===----------------------------------------------------------------------===//
@@ -211,8 +249,35 @@ class OpenMP_Op<string mnemonic, list<Trait> traits = [],
// Doesn't actually create a C++ base class (only defines default values for
// tablegen classes that derive from this). Use LoopTransformationInterface
// instead for common operations.
-class OpenMPTransform_Op<string mnemonic, list<Trait> traits = []> :
- OpenMP_Op<mnemonic, !listconcat([DeclareOpInterfaceMethods<LoopTransformationInterface>], traits) > {
+class OpenMPTransform_Op<string mnemonic,
+ list<Trait> traits = [],
+ list<OpenMP_Clause> clauses = []> :
+ OpenMP_Op<mnemonic,
+ traits = !listconcat([DeclareOpInterfaceMethods<LoopTransformationInterface>], traits),
+ clauses = clauses> {
+}
+
+// Base clause for loop transformations using the standard syntax.
+//
+// omp.opname ($generatees) <- ($applyees) clause(...) clause(...) ... <attr-dicr>
+// omp.opname ($applyees) clause(...) clause(...) ... <attr-dict>
+//
+// $generatees is optional and is assumed to be empty if omitted
+class OpenMPTransformBase_Op<string mnemonic,
+ list<Trait> traits = [],
+ list<OpenMP_Clause> clauses = []> :
+ OpenMPTransform_Op<mnemonic,
+ traits = !listconcat(traits, [AttrSizedOperandSegments]),
+ clauses = clauses> {
+
+ let arguments = !con(
+ (ins Variadic<CanonicalLoopInfoType>:$generatees,
+ Variadic<CanonicalLoopInfoType>:$applyees
+ ), clausesArgs);
+
+ let assemblyFormat = [{ custom<LoopTransformClis>($generatees, $applyees) }]
+ # clausesAssemblyFormat
+ # [{ attr-dict }];
}
#endif // OPENMP_OP_BASE
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
index 5c77e21..b73091e 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
@@ -358,44 +358,6 @@ def SingleOp : OpenMP_Op<"single", traits = [
}
//===---------------------------------------------------------------------===//
-// OpenMP Canonical Loop Info Type
-//===---------------------------------------------------------------------===//
-
-def CanonicalLoopInfoType : OpenMP_Type<"CanonicalLoopInfo", "cli"> {
- let summary = "Type for representing a reference to a canonical loop";
- let description = [{
- A variable of type CanonicalLoopInfo refers to an OpenMP-compatible
- canonical loop in the same function. Values of this type are not
- available at runtime and therefore cannot be used by the program itself,
- i.e. an opaque type. It is similar to the transform dialect's
- `!transform.interface` type, but instead of implementing an interface
- for each transformation, the OpenMP dialect itself defines possible
- operations on this type.
-
- A value of type CanonicalLoopInfoType (in the following: CLI) value can be
-
- 1. created by omp.new_cli.
- 2. passed to omp.canonical_loop to associate the loop to that CLI. A CLI
- can only be associated once.
- 3. passed to an omp loop transformation operation that modifies the loop
- associated with the CLI. The CLI is the "applyee" and the operation is
- the consumer. A CLI can only be consumed once.
- 4. passed to an omp loop transformation operation to associate the cli with
- a result of that transformation. The CLI is the "generatee" and the
- operation is the generator.
-
- A CLI cannot
-
- 1. be returned from a function.
- 2. be passed to operations that are not specifically designed to take a
- CanonicalLoopInfoType, including AnyType.
-
- A CLI directly corresponds to an object of
- OpenMPIRBuilder's CanonicalLoopInfo struct when lowering to LLVM-IR.
- }];
-}
-
-//===---------------------------------------------------------------------===//
// OpenMP Canonical Loop Info Creation
//===---------------------------------------------------------------------===//
@@ -564,6 +526,31 @@ def UnrollHeuristicOp : OpenMPTransform_Op<"unroll_heuristic", []> {
}
//===----------------------------------------------------------------------===//
+// OpenMP tile operation
+//===----------------------------------------------------------------------===//
+
+def TileOp : OpenMPTransformBase_Op<"tile",
+ clauses = [OpenMP_SizesClause]> {
+ let summary = "OpenMP tile operation";
+ let description = [{
+ Represents the OpenMP tile directive introduced in OpenMP 5.1.
+
+ The construct partitions the logical iteration space of the affected loops
+ into equally-sized tiles, then creates two sets of nested loops. The outer
+ loops, called the grid loops, iterate over all tiles. The inner loops,
+ called the intratile loops, iterate over the logical iterations of a tile.
+ The sizes clause determines the size of a tile.
+
+ Currently, the affected loops must be rectangular (the tripcount of the
+ inner loop must not depend on any iv of an surrounding affected loop) and
+ perfectly nested (except for the innermost affected loop, no operations
+ other than the nested loop and the terminator in the loop body).
+ }] # clausesDescription;
+
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
// 2.8.3 Workshare Construct
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h b/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h
index 74e1d28..ba11259 100644
--- a/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h
+++ b/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h
@@ -9,6 +9,7 @@
#ifndef MLIR_DIALECT_TRANSFORM_TUNEEXTENSION_TUNEEXTENSIONOPS_H
#define MLIR_DIALECT_TRANSFORM_TUNEEXTENSION_TUNEEXTENSIONOPS_H
+#include "mlir/Dialect/Transform/IR/TransformOps.h"
#include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/OpDefinition.h"
diff --git a/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td b/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td
index d68d451..d095659 100644
--- a/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td
+++ b/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td
@@ -11,10 +11,15 @@
include "mlir/Dialect/Transform/IR/TransformDialect.td"
include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.td"
+include "mlir/Interfaces/ControlFlowInterfaces.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/BuiltinAttributes.td"
include "mlir/IR/CommonAttrConstraints.td"
+//===----------------------------------------------------------------------===//
+// KnobOp
+//===----------------------------------------------------------------------===//
+
def KnobOp : Op<Transform_Dialect, "tune.knob", [
DeclareOpInterfaceMethods<TransformOpInterface>,
DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
@@ -52,4 +57,53 @@ def KnobOp : Op<Transform_Dialect, "tune.knob", [
"`<` $name `>` (`=` $selected^ `from`)? `options` `=` $options attr-dict `->` type(results)";
}
+//===----------------------------------------------------------------------===//
+// AlternativesOp
+//===----------------------------------------------------------------------===//
+
+def AlternativesOp : Op<Transform_Dialect, "tune.alternatives", [
+ DeclareOpInterfaceMethods<RegionBranchOpInterface,
+ ["getEntrySuccessorOperands", "getSuccessorRegions",
+ "getRegionInvocationBounds"]>,
+ DeclareOpInterfaceMethods<TransformOpInterface>,
+ DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
+ SingleBlockImplicitTerminator<"::mlir::transform::YieldOp">,
+ NoRegionArguments
+]> {
+ let summary = "Represents a choice among its regions, i.e. sub-schedules";
+
+ let description = [{
+ This op represents a choice over which of its regions is to be used.
+
+ When `selected_region` is provided, the semantics are that this op is to be
+ substituted for by the selected region, meaning the region's results become
+ the results of this op. Without a provided `selected_region`, the semantics
+ are that this non-deterministic choice is yet to be resolved -- which in
+ terms of the op's interpreted semantics is a failure.
+
+ The `selected_region` argument is either an `IntegerAttr` or a param holding
+ an `IntegerAttr`, which should provide a valid zero-based index with respect
+ to the number of alternatives, i.e. regions.
+ }];
+ let cppNamespace = [{ mlir::transform::tune }];
+
+ let arguments = (ins Builtin_StringAttr:$name,
+ OptionalAttr<APIntAttr>:$selected_region_attr,
+ Optional<TransformParamTypeInterface>:$selected_region_param);
+ let results = (outs Variadic<Transform_AnyHandleOrParamType>:$results);
+ let regions = (region VariadicRegion<SizedRegion<1>>:$alternatives);
+
+ let assemblyFormat = [{
+ `<` $name `>`
+ (`selected_region` `=` custom<AlternativesOpSelectedRegion>(
+ $selected_region_attr, $selected_region_param)^)?
+ attr-dict-with-keyword
+ (`:` type($selected_region_param)^)?
+ (`->` type($results)^)?
+ regions
+ }];
+
+ let hasVerifier = 1;
+}
+
#endif // MLIR_DIALECT_TRANSFORM_TUNEEXTENSION_TUNEEXTENSIONOPS
diff --git a/mlir/lib/Bindings/Python/IRCore.cpp b/mlir/lib/Bindings/Python/IRCore.cpp
index 83a8757..32b2b0c 100644
--- a/mlir/lib/Bindings/Python/IRCore.cpp
+++ b/mlir/lib/Bindings/Python/IRCore.cpp
@@ -3219,13 +3219,11 @@ void mlir::python::populateIRCore(nb::module_ &m) {
nb::arg("end_line"), nb::arg("end_col"),
nb::arg("context") = nb::none(), kContextGetFileRangeDocstring)
.def("is_a_file", mlirLocationIsAFileLineColRange)
- .def_prop_ro(
- "filename",
- [](MlirLocation loc) {
- return mlirIdentifierStr(
- mlirLocationFileLineColRangeGetFilename(loc));
- },
- nb::sig("def filename(self) -> str"))
+ .def_prop_ro("filename",
+ [](MlirLocation loc) {
+ return mlirIdentifierStr(
+ mlirLocationFileLineColRangeGetFilename(loc));
+ })
.def_prop_ro("start_line", mlirLocationFileLineColRangeGetStartLine)
.def_prop_ro("start_col", mlirLocationFileLineColRangeGetStartColumn)
.def_prop_ro("end_line", mlirLocationFileLineColRangeGetEndLine)
@@ -3274,12 +3272,10 @@ void mlir::python::populateIRCore(nb::module_ &m) {
nb::arg("name"), nb::arg("childLoc") = nb::none(),
nb::arg("context") = nb::none(), kContextGetNameLocationDocString)
.def("is_a_name", mlirLocationIsAName)
- .def_prop_ro(
- "name_str",
- [](MlirLocation loc) {
- return mlirIdentifierStr(mlirLocationNameGetName(loc));
- },
- nb::sig("def name_str(self) -> str"))
+ .def_prop_ro("name_str",
+ [](MlirLocation loc) {
+ return mlirIdentifierStr(mlirLocationNameGetName(loc));
+ })
.def_prop_ro("child_loc",
[](PyLocation &self) {
return PyLocation(self.getContext(),
@@ -3453,15 +3449,13 @@ void mlir::python::populateIRCore(nb::module_ &m) {
return concreteOperation.getContext().getObject();
},
"Context that owns the Operation")
- .def_prop_ro(
- "name",
- [](PyOperationBase &self) {
- auto &concreteOperation = self.getOperation();
- concreteOperation.checkValid();
- MlirOperation operation = concreteOperation.get();
- return mlirIdentifierStr(mlirOperationGetName(operation));
- },
- nb::sig("def name(self) -> str"))
+ .def_prop_ro("name",
+ [](PyOperationBase &self) {
+ auto &concreteOperation = self.getOperation();
+ concreteOperation.checkValid();
+ MlirOperation operation = concreteOperation.get();
+ return mlirIdentifierStr(mlirOperationGetName(operation));
+ })
.def_prop_ro("operands",
[](PyOperationBase &self) {
return PyOpOperandList(self.getOperation().getRef());
@@ -3485,15 +3479,21 @@ void mlir::python::populateIRCore(nb::module_ &m) {
},
"Shortcut to get an op result if it has only one (throws an error "
"otherwise).")
- .def_prop_ro(
+ .def_prop_rw(
"location",
[](PyOperationBase &self) {
PyOperation &operation = self.getOperation();
return PyLocation(operation.getContext(),
mlirOperationGetLocation(operation.get()));
},
- "Returns the source location the operation was defined or derived "
- "from.")
+ [](PyOperationBase &self, const PyLocation &location) {
+ PyOperation &operation = self.getOperation();
+ mlirOperationSetLocation(operation.get(), location.get());
+ },
+ nb::for_getter("Returns the source location the operation was "
+ "defined or derived from."),
+ nb::for_setter("Sets the source location the operation was defined "
+ "or derived from."))
.def_prop_ro("parent",
[](PyOperationBase &self)
-> std::optional<nb::typed<nb::object, PyOperation>> {
@@ -3597,12 +3597,11 @@ void mlir::python::populateIRCore(nb::module_ &m) {
},
"Reports if the operation is attached to its parent block.")
.def("erase", [](PyOperationBase &self) { self.getOperation().erase(); })
- .def(
- "walk", &PyOperationBase::walk, nb::arg("callback"),
- nb::arg("walk_order") = MlirWalkPostOrder,
- // clang-format off
- nb::sig("def walk(self, callback: Callable[[Operation], WalkResult], walk_order: WalkOrder = " MAKE_MLIR_PYTHON_QUALNAME("ir.WalkOrder.POST_ORDER") ") -> None")
- // clang-format on
+ .def("walk", &PyOperationBase::walk, nb::arg("callback"),
+ nb::arg("walk_order") = MlirWalkPostOrder,
+ // clang-format off
+ nb::sig("def walk(self, callback: Callable[[Operation], WalkResult], walk_order: WalkOrder) -> None")
+ // clang-format on
);
nb::class_<PyOperation, PyOperationBase>(m, "Operation")
@@ -4118,7 +4117,6 @@ void mlir::python::populateIRCore(nb::module_ &m) {
[](PyNamedAttribute &self) {
return mlirIdentifierStr(self.namedAttr.name);
},
- nb::sig("def name(self) -> str"),
"The name of the NamedAttribute binding")
.def_prop_ro(
"attr",
@@ -4336,17 +4334,15 @@ void mlir::python::populateIRCore(nb::module_ &m) {
kValueReplaceAllUsesWithDocstring)
.def(
"replace_all_uses_except",
- [](MlirValue self, MlirValue with, PyOperation &exception) {
+ [](PyValue &self, PyValue &with, PyOperation &exception) {
MlirOperation exceptedUser = exception.get();
mlirValueReplaceAllUsesExcept(self, with, 1, &exceptedUser);
},
nb::arg("with_"), nb::arg("exceptions"),
- nb::sig("def replace_all_uses_except(self, with_: Value, exceptions: "
- "Operation) -> None"),
kValueReplaceAllUsesExceptDocstring)
.def(
"replace_all_uses_except",
- [](MlirValue self, MlirValue with, nb::list exceptions) {
+ [](PyValue &self, PyValue &with, const nb::list &exceptions) {
// Convert Python list to a SmallVector of MlirOperations
llvm::SmallVector<MlirOperation> exceptionOps;
for (nb::handle exception : exceptions) {
@@ -4358,8 +4354,6 @@ void mlir::python::populateIRCore(nb::module_ &m) {
exceptionOps.data());
},
nb::arg("with_"), nb::arg("exceptions"),
- nb::sig("def replace_all_uses_except(self, with_: Value, exceptions: "
- "Sequence[Operation]) -> None"),
kValueReplaceAllUsesExceptDocstring)
.def(
"replace_all_uses_except",
diff --git a/mlir/lib/Bindings/Python/IRModule.h b/mlir/lib/Bindings/Python/IRModule.h
index 598ae01..edbd73e 100644
--- a/mlir/lib/Bindings/Python/IRModule.h
+++ b/mlir/lib/Bindings/Python/IRModule.h
@@ -273,8 +273,7 @@ class DefaultingPyMlirContext
: public Defaulting<DefaultingPyMlirContext, PyMlirContext> {
public:
using Defaulting::Defaulting;
- static constexpr const char kTypeDescription[] =
- MAKE_MLIR_PYTHON_QUALNAME("ir.Context");
+ static constexpr const char kTypeDescription[] = "Context";
static PyMlirContext &resolve();
};
@@ -500,8 +499,7 @@ class DefaultingPyLocation
: public Defaulting<DefaultingPyLocation, PyLocation> {
public:
using Defaulting::Defaulting;
- static constexpr const char kTypeDescription[] =
- MAKE_MLIR_PYTHON_QUALNAME("ir.Location");
+ static constexpr const char kTypeDescription[] = "Location";
static PyLocation &resolve();
operator MlirLocation() const { return *get(); }
diff --git a/mlir/lib/Bindings/Python/IRTypes.cpp b/mlir/lib/Bindings/Python/IRTypes.cpp
index 3488d92..34c5b8d 100644
--- a/mlir/lib/Bindings/Python/IRTypes.cpp
+++ b/mlir/lib/Bindings/Python/IRTypes.cpp
@@ -1010,7 +1010,7 @@ public:
},
nb::arg("elements"), nb::arg("context") = nb::none(),
// clang-format off
- nb::sig("def get_tuple(elements: Sequence[Type], context: mlir.ir.Context | None = None) -> TupleType"),
+ nb::sig("def get_tuple(elements: Sequence[Type], context: Context | None = None) -> TupleType"),
// clang-format on
"Create a tuple type");
c.def(
@@ -1070,7 +1070,7 @@ public:
},
nb::arg("inputs"), nb::arg("results"), nb::arg("context") = nb::none(),
// clang-format off
- nb::sig("def get(inputs: Sequence[Type], results: Sequence[Type], context: mlir.ir.Context | None = None) -> FunctionType"),
+ nb::sig("def get(inputs: Sequence[Type], results: Sequence[Type], context: Context | None = None) -> FunctionType"),
// clang-format on
"Gets a FunctionType from a list of input and result types");
c.def_prop_ro(
diff --git a/mlir/lib/Bindings/Python/MainModule.cpp b/mlir/lib/Bindings/Python/MainModule.cpp
index 52656138..a14f09f 100644
--- a/mlir/lib/Bindings/Python/MainModule.cpp
+++ b/mlir/lib/Bindings/Python/MainModule.cpp
@@ -115,9 +115,6 @@ NB_MODULE(_mlir, m) {
});
},
"typeid"_a, nb::kw_only(), "replace"_a = false,
- // clang-format off
- nb::sig("def register_type_caster(typeid: " MAKE_MLIR_PYTHON_QUALNAME("ir.TypeID") ", *, replace: bool = False) -> object"),
- // clang-format on
"Register a type caster for casting MLIR types to custom user types.");
m.def(
MLIR_PYTHON_CAPI_VALUE_CASTER_REGISTER_ATTR,
@@ -130,9 +127,6 @@ NB_MODULE(_mlir, m) {
});
},
"typeid"_a, nb::kw_only(), "replace"_a = false,
- // clang-format off
- nb::sig("def register_value_caster(typeid: " MAKE_MLIR_PYTHON_QUALNAME("ir.TypeID") ", *, replace: bool = False) -> object"),
- // clang-format on
"Register a value caster for casting MLIR values to custom user values.");
// Define and populate IR submodule.
diff --git a/mlir/lib/Bindings/Python/Rewrite.cpp b/mlir/lib/Bindings/Python/Rewrite.cpp
index f18298e..836f44fd 100644
--- a/mlir/lib/Bindings/Python/Rewrite.cpp
+++ b/mlir/lib/Bindings/Python/Rewrite.cpp
@@ -127,7 +127,7 @@ public:
mlirPythonFrozenRewritePatternSetToCapsule(get()));
}
- static nb::object createFromCapsule(nb::object capsule) {
+ static nb::object createFromCapsule(const nb::object &capsule) {
MlirFrozenRewritePatternSet rawPm =
mlirPythonCapsuleToFrozenRewritePatternSet(capsule.ptr());
if (rawPm.ptr == nullptr)
diff --git a/mlir/lib/CAPI/IR/IR.cpp b/mlir/lib/CAPI/IR/IR.cpp
index e9844a7..1881865 100644
--- a/mlir/lib/CAPI/IR/IR.cpp
+++ b/mlir/lib/CAPI/IR/IR.cpp
@@ -656,6 +656,10 @@ MlirLocation mlirOperationGetLocation(MlirOperation op) {
return wrap(unwrap(op)->getLoc());
}
+void mlirOperationSetLocation(MlirOperation op, MlirLocation loc) {
+ unwrap(op)->setLoc(unwrap(loc));
+}
+
MlirTypeID mlirOperationGetTypeID(MlirOperation op) {
if (auto info = unwrap(op)->getRegisteredInfo())
return wrap(info->getTypeID());
diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
index cc6314c..a6f816a 100644
--- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
+++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
@@ -465,6 +465,51 @@ struct AssumeAlignmentOpLowering
}
};
+struct DistinctObjectsOpLowering
+ : public ConvertOpToLLVMPattern<memref::DistinctObjectsOp> {
+ using ConvertOpToLLVMPattern<
+ memref::DistinctObjectsOp>::ConvertOpToLLVMPattern;
+ explicit DistinctObjectsOpLowering(const LLVMTypeConverter &converter)
+ : ConvertOpToLLVMPattern<memref::DistinctObjectsOp>(converter) {}
+
+ LogicalResult
+ matchAndRewrite(memref::DistinctObjectsOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ ValueRange operands = adaptor.getOperands();
+ if (operands.size() <= 1) {
+ // Fast path.
+ rewriter.replaceOp(op, operands);
+ return success();
+ }
+
+ Location loc = op.getLoc();
+ SmallVector<Value> ptrs;
+ for (auto [origOperand, newOperand] :
+ llvm::zip_equal(op.getOperands(), operands)) {
+ auto memrefType = cast<MemRefType>(origOperand.getType());
+ MemRefDescriptor memRefDescriptor(newOperand);
+ Value ptr = memRefDescriptor.bufferPtr(rewriter, loc, *getTypeConverter(),
+ memrefType);
+ ptrs.push_back(ptr);
+ }
+
+ auto cond =
+ LLVM::ConstantOp::create(rewriter, loc, rewriter.getI1Type(), 1);
+ // Generate separate_storage assumptions for each pair of pointers.
+ for (auto i : llvm::seq<size_t>(ptrs.size() - 1)) {
+ for (auto j : llvm::seq<size_t>(i + 1, ptrs.size())) {
+ Value ptr1 = ptrs[i];
+ Value ptr2 = ptrs[j];
+ LLVM::AssumeOp::create(rewriter, loc, cond,
+ LLVM::AssumeSeparateStorageTag{}, ptr1, ptr2);
+ }
+ }
+
+ rewriter.replaceOp(op, operands);
+ return success();
+ }
+};
+
// A `dealloc` is converted into a call to `free` on the underlying data buffer.
// The memref descriptor being an SSA value, there is no need to clean it up
// in any way.
@@ -1997,22 +2042,23 @@ void mlir::populateFinalizeMemRefToLLVMConversionPatterns(
patterns.add<
AllocaOpLowering,
AllocaScopeOpLowering,
- AtomicRMWOpLowering,
AssumeAlignmentOpLowering,
+ AtomicRMWOpLowering,
ConvertExtractAlignedPointerAsIndex,
DimOpLowering,
+ DistinctObjectsOpLowering,
ExtractStridedMetadataOpLowering,
GenericAtomicRMWOpLowering,
GetGlobalMemrefOpLowering,
LoadOpLowering,
MemRefCastOpLowering,
- MemorySpaceCastOpLowering,
MemRefReinterpretCastOpLowering,
MemRefReshapeOpLowering,
+ MemorySpaceCastOpLowering,
PrefetchOpLowering,
RankOpLowering,
- ReassociatingReshapeOpConversion<memref::ExpandShapeOp>,
ReassociatingReshapeOpConversion<memref::CollapseShapeOp>,
+ ReassociatingReshapeOpConversion<memref::ExpandShapeOp>,
StoreOpLowering,
SubViewOpLowering,
TransposeOpLowering,
diff --git a/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineMinMax.cpp b/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineMinMax.cpp
index f3e065a..9821a75 100644
--- a/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineMinMax.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineMinMax.cpp
@@ -246,6 +246,6 @@ void SimplifyAffineMinMaxPass::runOnOperation() {
patterns.add<SimplifyAffineMaxOp, SimplifyAffineMinOp, SimplifyAffineApplyOp>(
func.getContext());
FrozenRewritePatternSet frozenPatterns(std::move(patterns));
- if (failed(applyPatternsGreedily(func, std::move(frozenPatterns))))
+ if (failed(applyPatternsGreedily(func, frozenPatterns)))
return signalPassFailure();
}
diff --git a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp
index 7cfd6d3..898d76c 100644
--- a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp
+++ b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp
@@ -1282,6 +1282,13 @@ OpFoldResult arith::MulFOp::fold(FoldAdaptor adaptor) {
if (matchPattern(adaptor.getRhs(), m_OneFloat()))
return getLhs();
+ if (arith::bitEnumContainsAll(getFastmath(), arith::FastMathFlags::nnan |
+ arith::FastMathFlags::nsz)) {
+ // mulf(x, 0) -> 0
+ if (matchPattern(adaptor.getRhs(), m_AnyZeroFloat()))
+ return getRhs();
+ }
+
return constFoldBinaryOp<FloatAttr>(
adaptor.getOperands(),
[](const APFloat &a, const APFloat &b) { return a * b; });
diff --git a/mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp b/mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp
index 7626d35..c64e10f5 100644
--- a/mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp
+++ b/mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp
@@ -123,7 +123,8 @@ void mlir::arith::populateEmulateUnsupportedFloatsLegality(
vector::OuterProductOp, vector::ScanOp>(
[&](Operation *op) { return converter.isLegal(op); });
target.addLegalOp<arith::BitcastOp, arith::ExtFOp, arith::TruncFOp,
- arith::ConstantOp, vector::SplatOp, vector::BroadcastOp>();
+ arith::ConstantOp, arith::SelectOp, vector::SplatOp,
+ vector::BroadcastOp>();
}
void EmulateUnsupportedFloatsPass::runOnOperation() {
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 3f0b0ba..dd9b4c2 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -42,6 +42,7 @@
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/DebugLog.h"
#include "llvm/Support/LogicalResult.h"
@@ -273,32 +274,6 @@ void transform::ApplyFoldPackUnpackIntoEmptyPatternsOp::populatePatterns(
// BufferizeToAllocationOp
//===----------------------------------------------------------------------===//
-void transform::BufferizeToAllocationOp::build(OpBuilder &b,
- OperationState &result,
- Value target,
- Attribute memorySpace) {
- SmallVector<Type> resultTypes;
- resultTypes.push_back(b.getType<transform::AnyValueType>());
- resultTypes.push_back(b.getType<transform::AnyOpType>());
- return build(b, result,
- /*resultTypes=*/resultTypes,
- /*target=*/target,
- /*memory_space=*/memorySpace);
-}
-
-void transform::BufferizeToAllocationOp::build(OpBuilder &b,
- OperationState &result,
- Value target,
- int64_t memorySpace) {
- SmallVector<Type> resultTypes;
- resultTypes.push_back(b.getType<transform::AnyValueType>());
- resultTypes.push_back(b.getType<transform::AnyOpType>());
- return build(b, result,
- /*resultTypes=*/resultTypes,
- /*target=*/target,
- /*memory_space=*/b.getI64IntegerAttr(memorySpace));
-}
-
namespace {
class NewOpsListener : public RewriterBase::ForwardingListener {
public:
@@ -409,6 +384,95 @@ LogicalResult transform::BufferizeToAllocationOp::verify() {
}
//===----------------------------------------------------------------------===//
+// PromoteTensorOp
+//===----------------------------------------------------------------------===//
+
+/// Return true if the operand may be read from by its owner. This is currently
+/// very conservative and only looks inside linalg operations to prevent
+/// unintentional data loss.
+static bool mayBeRead(OpOperand &operand) {
+ auto linalgOp = dyn_cast<linalg::LinalgOp>(operand.getOwner());
+
+ // Be conservative about ops we cannot analyze deeper.
+ if (!linalgOp)
+ return true;
+
+ // Look inside linalg ops.
+ Value blockArgument = linalgOp.getMatchingBlockArgument(&operand);
+ return !blockArgument.use_empty();
+}
+
+/// Return true if the value may be read through any of its uses.
+static bool mayBeRead(Value value) {
+ // If the value has a reference semantics, it
+ // may be read through any alias...
+ if (!isa<TensorType, FloatType, IntegerType>(value.getType()))
+ return true;
+ return llvm::any_of(value.getUses(),
+ static_cast<bool (&)(OpOperand &)>(mayBeRead));
+}
+
+DiagnosedSilenceableFailure
+transform::PromoteTensorOp::apply(transform::TransformRewriter &rewriter,
+ transform::TransformResults &results,
+ transform::TransformState &state) {
+ SmallVector<Value> promoted;
+ for (Value tensor : state.getPayloadValues(getTensor())) {
+ auto type = dyn_cast<RankedTensorType>(tensor.getType());
+ if (!type) {
+ return emitSilenceableError() << "non-tensor type: " << tensor;
+ }
+
+ Operation *definingOp = tensor.getDefiningOp();
+ if (definingOp)
+ rewriter.setInsertionPointAfter(definingOp);
+ else
+ rewriter.setInsertionPointToStart(cast<BlockArgument>(tensor).getOwner());
+
+ // Check this before we emit operations using this value.
+ bool needsMaterialization = mayBeRead(tensor);
+
+ SmallVector<Value> dynamicDims;
+ llvm::SmallPtrSet<Operation *, 4> preservedOps;
+ for (auto [pos, dim] : llvm::enumerate(type.getShape())) {
+ if (!ShapedType::isDynamic(dim))
+ continue;
+ Value cst = rewriter.create<arith::ConstantIndexOp>(tensor.getLoc(), pos);
+ auto dimOp = rewriter.create<tensor::DimOp>(tensor.getLoc(), tensor, cst);
+ preservedOps.insert(dimOp);
+ dynamicDims.push_back(dimOp);
+ }
+ auto allocation = rewriter.create<bufferization::AllocTensorOp>(
+ tensor.getLoc(), type, dynamicDims);
+ // Set memory space if provided.
+ if (getMemorySpaceAttr())
+ allocation.setMemorySpaceAttr(getMemorySpaceAttr());
+ Value allocated = allocation;
+
+ // Only insert a materialization (typically bufferizes to a copy) when the
+ // value may be read from.
+ if (needsMaterialization) {
+ auto copy = rewriter.create<bufferization::MaterializeInDestinationOp>(
+ tensor.getLoc(), tensor, allocated);
+ preservedOps.insert(copy);
+ promoted.push_back(copy.getResult());
+ } else {
+ promoted.push_back(allocated);
+ }
+ rewriter.replaceAllUsesExcept(tensor, promoted.back(), preservedOps);
+ }
+ results.setValues(cast<OpResult>(getPromoted()), promoted);
+ return DiagnosedSilenceableFailure::success();
+}
+
+void transform::PromoteTensorOp::getEffects(
+ SmallVectorImpl<MemoryEffects::EffectInstance> &effects) {
+ transform::onlyReadsHandle(getTensorMutable(), effects);
+ transform::producesHandle(getOperation()->getOpResults(), effects);
+ transform::modifiesPayload(effects);
+}
+
+//===----------------------------------------------------------------------===//
// DecomposeOp
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Math/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Math/Transforms/CMakeLists.txt
index ff62b51..8899c3a 100644
--- a/mlir/lib/Dialect/Math/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/Math/Transforms/CMakeLists.txt
@@ -3,6 +3,7 @@ add_mlir_dialect_library(MLIRMathTransforms
ExpandOps.cpp
ExtendToSupportedTypes.cpp
PolynomialApproximation.cpp
+ SincosFusion.cpp
UpliftToFMA.cpp
ADDITIONAL_HEADER_DIRS
diff --git a/mlir/lib/Dialect/Math/Transforms/SincosFusion.cpp b/mlir/lib/Dialect/Math/Transforms/SincosFusion.cpp
new file mode 100644
index 0000000..69407df
--- /dev/null
+++ b/mlir/lib/Dialect/Math/Transforms/SincosFusion.cpp
@@ -0,0 +1,80 @@
+//===- SincosFusion.cpp - Fuse sin/cos into sincos -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Math/IR/Math.h"
+#include "mlir/Dialect/Math/Transforms/Passes.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+
+using namespace mlir;
+using namespace mlir::math;
+
+namespace {
+
+/// Fuse a math.sin and math.cos in the same block that use the same operand and
+/// have identical fastmath flags into a single math.sincos.
+struct SincosFusionPattern : OpRewritePattern<math::SinOp> {
+ using Base::Base;
+
+ LogicalResult matchAndRewrite(math::SinOp sinOp,
+ PatternRewriter &rewriter) const override {
+ Value operand = sinOp.getOperand();
+ mlir::arith::FastMathFlags sinFastMathFlags = sinOp.getFastmath();
+
+ math::CosOp cosOp = nullptr;
+ sinOp->getBlock()->walk([&](math::CosOp op) {
+ if (op.getOperand() == operand && op.getFastmath() == sinFastMathFlags) {
+ cosOp = op;
+ return WalkResult::interrupt();
+ }
+ return WalkResult::advance();
+ });
+
+ if (!cosOp)
+ return failure();
+
+ Operation *firstOp = sinOp->isBeforeInBlock(cosOp) ? sinOp.getOperation()
+ : cosOp.getOperation();
+ rewriter.setInsertionPoint(firstOp);
+
+ Type elemType = sinOp.getType();
+ auto sincos = math::SincosOp::create(rewriter, firstOp->getLoc(),
+ TypeRange{elemType, elemType}, operand,
+ sinOp.getFastmathAttr());
+
+ rewriter.replaceOp(sinOp, sincos.getSin());
+ rewriter.replaceOp(cosOp, sincos.getCos());
+ return success();
+ }
+};
+
+} // namespace
+
+namespace mlir::math {
+#define GEN_PASS_DEF_MATHSINCOSFUSIONPASS
+#include "mlir/Dialect/Math/Transforms/Passes.h.inc"
+} // namespace mlir::math
+
+namespace {
+
+struct MathSincosFusionPass final
+ : math::impl::MathSincosFusionPassBase<MathSincosFusionPass> {
+ using MathSincosFusionPassBase::MathSincosFusionPassBase;
+
+ void runOnOperation() override {
+ RewritePatternSet patterns(&getContext());
+ patterns.add<SincosFusionPattern>(&getContext());
+
+ GreedyRewriteConfig config;
+ if (failed(
+ applyPatternsGreedily(getOperation(), std::move(patterns), config)))
+ return signalPassFailure();
+ }
+};
+
+} // namespace
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 349b4de..e9bdcda 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -607,6 +607,29 @@ AssumeAlignmentOp::bubbleDownCasts(OpBuilder &builder) {
}
//===----------------------------------------------------------------------===//
+// DistinctObjectsOp
+//===----------------------------------------------------------------------===//
+
+LogicalResult DistinctObjectsOp::verify() {
+ if (getOperandTypes() != getResultTypes())
+ return emitOpError("operand types and result types must match");
+
+ if (getOperandTypes().empty())
+ return emitOpError("expected at least one operand");
+
+ return success();
+}
+
+LogicalResult DistinctObjectsOp::inferReturnTypes(
+ MLIRContext * /*context*/, std::optional<Location> /*location*/,
+ ValueRange operands, DictionaryAttr /*attributes*/,
+ OpaqueProperties /*properties*/, RegionRange /*regions*/,
+ SmallVectorImpl<Type> &inferredReturnTypes) {
+ llvm::copy(operands.getTypes(), std::back_inserter(inferredReturnTypes));
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
// CastOp
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index f01ad05..5672942 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -33,6 +33,7 @@
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/ADT/bit.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
+#include "llvm/Support/InterleavedRange.h"
#include <cstddef>
#include <iterator>
#include <optional>
@@ -77,6 +78,232 @@ struct LLVMPointerPointerLikeModel
};
} // namespace
+/// Generate a name of a canonical loop nest of the format
+/// `<prefix>(_r<idx>_s<idx>)*`. Hereby, `_r<idx>` identifies the region
+/// argument index of an operation that has multiple regions, if the operation
+/// has multiple regions.
+/// `_s<idx>` identifies the position of an operation within a region, where
+/// only operations that may potentially contain loops ("container operations"
+/// i.e. have region arguments) are counted. Again, it is omitted if there is
+/// only one such operation in a region. If there are canonical loops nested
+/// inside each other, also may also use the format `_d<num>` where <num> is the
+/// nesting depth of the loop.
+///
+/// The generated name is a best-effort to make canonical loop unique within an
+/// SSA namespace. This also means that regions with IsolatedFromAbove property
+/// do not consider any parents or siblings.
+static std::string generateLoopNestingName(StringRef prefix,
+ CanonicalLoopOp op) {
+ struct Component {
+ /// If true, this component describes a region operand of an operation (the
+ /// operand's owner) If false, this component describes an operation located
+ /// in a parent region
+ bool isRegionArgOfOp;
+ bool skip = false;
+ bool isUnique = false;
+
+ size_t idx;
+ Operation *op;
+ Region *parentRegion;
+ size_t loopDepth;
+
+ Operation *&getOwnerOp() {
+ assert(isRegionArgOfOp && "Must describe a region operand");
+ return op;
+ }
+ size_t &getArgIdx() {
+ assert(isRegionArgOfOp && "Must describe a region operand");
+ return idx;
+ }
+
+ Operation *&getContainerOp() {
+ assert(!isRegionArgOfOp && "Must describe a operation of a region");
+ return op;
+ }
+ size_t &getOpPos() {
+ assert(!isRegionArgOfOp && "Must describe a operation of a region");
+ return idx;
+ }
+ bool isLoopOp() const {
+ assert(!isRegionArgOfOp && "Must describe a operation of a region");
+ return isa<CanonicalLoopOp>(op);
+ }
+ Region *&getParentRegion() {
+ assert(!isRegionArgOfOp && "Must describe a operation of a region");
+ return parentRegion;
+ }
+ size_t &getLoopDepth() {
+ assert(!isRegionArgOfOp && "Must describe a operation of a region");
+ return loopDepth;
+ }
+
+ void skipIf(bool v = true) { skip = skip || v; }
+ };
+
+ // List of ancestors, from inner to outer.
+ // Alternates between
+ // * region argument of an operation
+ // * operation within a region
+ SmallVector<Component> components;
+
+ // Gather a list of parent regions and operations, and the position within
+ // their parent
+ Operation *o = op.getOperation();
+ while (o) {
+ // Operation within a region
+ Region *r = o->getParentRegion();
+ if (!r)
+ break;
+
+ llvm::ReversePostOrderTraversal<Block *> traversal(&r->getBlocks().front());
+ size_t idx = 0;
+ bool found = false;
+ size_t sequentialIdx = -1;
+ bool isOnlyContainerOp = true;
+ for (Block *b : traversal) {
+ for (Operation &op : *b) {
+ if (&op == o && !found) {
+ sequentialIdx = idx;
+ found = true;
+ }
+ if (op.getNumRegions()) {
+ idx += 1;
+ if (idx > 1)
+ isOnlyContainerOp = false;
+ }
+ if (found && !isOnlyContainerOp)
+ break;
+ }
+ }
+
+ Component &containerOpInRegion = components.emplace_back();
+ containerOpInRegion.isRegionArgOfOp = false;
+ containerOpInRegion.isUnique = isOnlyContainerOp;
+ containerOpInRegion.getContainerOp() = o;
+ containerOpInRegion.getOpPos() = sequentialIdx;
+ containerOpInRegion.getParentRegion() = r;
+
+ Operation *parent = r->getParentOp();
+
+ // Region argument of an operation
+ Component &regionArgOfOperation = components.emplace_back();
+ regionArgOfOperation.isRegionArgOfOp = true;
+ regionArgOfOperation.isUnique = true;
+ regionArgOfOperation.getArgIdx() = 0;
+ regionArgOfOperation.getOwnerOp() = parent;
+
+ // The IsolatedFromAbove trait of the parent operation implies that each
+ // individual region argument has its own separate namespace, so no
+ // ambiguity.
+ if (!parent || parent->hasTrait<mlir::OpTrait::IsIsolatedFromAbove>())
+ break;
+
+ // Component only needed if operation has multiple region operands. Region
+ // arguments may be optional, but we currently do not consider this.
+ if (parent->getRegions().size() > 1) {
+ auto getRegionIndex = [](Operation *o, Region *r) {
+ for (auto [idx, region] : llvm::enumerate(o->getRegions())) {
+ if (&region == r)
+ return idx;
+ }
+ llvm_unreachable("Region not child of its parent operation");
+ };
+ regionArgOfOperation.isUnique = false;
+ regionArgOfOperation.getArgIdx() = getRegionIndex(parent, r);
+ }
+
+ // next parent
+ o = parent;
+ }
+
+ // Determine whether a region-argument component is not needed
+ for (Component &c : components)
+ c.skipIf(c.isRegionArgOfOp && c.isUnique);
+
+ // Find runs of nested loops and determine each loop's depth in the loop nest
+ size_t numSurroundingLoops = 0;
+ for (Component &c : llvm::reverse(components)) {
+ if (c.skip)
+ continue;
+
+ // non-skipped multi-argument operands interrupt the loop nest
+ if (c.isRegionArgOfOp) {
+ numSurroundingLoops = 0;
+ continue;
+ }
+
+ // Multiple loops in a region means each of them is the outermost loop of a
+ // new loop nest
+ if (!c.isUnique)
+ numSurroundingLoops = 0;
+
+ c.getLoopDepth() = numSurroundingLoops;
+
+ // Next loop is surrounded by one more loop
+ if (isa<CanonicalLoopOp>(c.getContainerOp()))
+ numSurroundingLoops += 1;
+ }
+
+ // In loop nests, skip all but the innermost loop that contains the depth
+ // number
+ bool isLoopNest = false;
+ for (Component &c : components) {
+ if (c.skip || c.isRegionArgOfOp)
+ continue;
+
+ if (!isLoopNest && c.getLoopDepth() >= 1) {
+ // Innermost loop of a loop nest of at least two loops
+ isLoopNest = true;
+ } else if (isLoopNest) {
+ // Non-innermost loop of a loop nest
+ c.skipIf(c.isUnique);
+
+ // If there is no surrounding loop left, this must have been the outermost
+ // loop; leave loop-nest mode for the next iteration
+ if (c.getLoopDepth() == 0)
+ isLoopNest = false;
+ }
+ }
+
+ // Skip non-loop unambiguous regions (but they should interrupt loop nests, so
+ // we mark them as skipped only after computing loop nests)
+ for (Component &c : components)
+ c.skipIf(!c.isRegionArgOfOp && c.isUnique &&
+ !isa<CanonicalLoopOp>(c.getContainerOp()));
+
+ // Components can be skipped if they are already disambiguated by their parent
+ // (or does not have a parent)
+ bool newRegion = true;
+ for (Component &c : llvm::reverse(components)) {
+ c.skipIf(newRegion && c.isUnique);
+
+ // non-skipped components disambiguate unique children
+ if (!c.skip)
+ newRegion = true;
+
+ // ...except canonical loops that need a suffix for each nest
+ if (!c.isRegionArgOfOp && c.getContainerOp())
+ newRegion = false;
+ }
+
+ // Compile the nesting name string
+ SmallString<64> Name{prefix};
+ llvm::raw_svector_ostream NameOS(Name);
+ for (auto &c : llvm::reverse(components)) {
+ if (c.skip)
+ continue;
+
+ if (c.isRegionArgOfOp)
+ NameOS << "_r" << c.getArgIdx();
+ else if (c.getLoopDepth() >= 1)
+ NameOS << "_d" << c.getLoopDepth();
+ else
+ NameOS << "_s" << c.getOpPos();
+ }
+
+ return NameOS.str().str();
+}
+
void OpenMPDialect::initialize() {
addOperations<
#define GET_OP_LIST
@@ -182,7 +409,7 @@ static ParseResult parseClauseAttr(AsmParser &parser, ClauseAttr &attr) {
}
template <typename ClauseAttr>
-void printClauseAttr(OpAsmPrinter &p, Operation *op, ClauseAttr attr) {
+static void printClauseAttr(OpAsmPrinter &p, Operation *op, ClauseAttr attr) {
p << stringifyEnum(attr.getValue());
}
@@ -1511,8 +1738,8 @@ static LogicalResult verifySynchronizationHint(Operation *op, uint64_t hint) {
//===----------------------------------------------------------------------===//
// Helper function to get bitwise AND of `value` and 'flag'
-uint64_t mapTypeToBitFlag(uint64_t value,
- llvm::omp::OpenMPOffloadMappingFlags flag) {
+static uint64_t mapTypeToBitFlag(uint64_t value,
+ llvm::omp::OpenMPOffloadMappingFlags flag) {
return value & llvm::to_underlying(flag);
}
@@ -3159,6 +3386,9 @@ void NewCliOp::getAsmResultNames(OpAsmSetValueNameFn setNameFn) {
Value result = getResult();
auto [newCli, gen, cons] = decodeCli(result);
+ // Structured binding `gen` cannot be captured in lambdas before C++20
+ OpOperand *generator = gen;
+
// Derive the CLI variable name from its generator:
// * "canonloop" for omp.canonical_loop
// * custom name for loop transformation generatees
@@ -3172,71 +3402,29 @@ void NewCliOp::getAsmResultNames(OpAsmSetValueNameFn setNameFn) {
cliName =
TypeSwitch<Operation *, std::string>(gen->getOwner())
.Case([&](CanonicalLoopOp op) {
- // Find the canonical loop nesting: For each ancestor add a
- // "+_r<idx>" suffix (in reverse order)
- SmallVector<std::string> components;
- Operation *o = op.getOperation();
- while (o) {
- if (o->hasTrait<mlir::OpTrait::IsIsolatedFromAbove>())
- break;
-
- Region *r = o->getParentRegion();
- if (!r)
- break;
-
- auto getSequentialIndex = [](Region *r, Operation *o) {
- llvm::ReversePostOrderTraversal<Block *> traversal(
- &r->getBlocks().front());
- size_t idx = 0;
- for (Block *b : traversal) {
- for (Operation &op : *b) {
- if (&op == o)
- return idx;
- // Only consider operations that are containers as
- // possible children
- if (!op.getRegions().empty())
- idx += 1;
- }
- }
- llvm_unreachable("Operation not part of the region");
- };
- size_t sequentialIdx = getSequentialIndex(r, o);
- components.push_back(("s" + Twine(sequentialIdx)).str());
-
- Operation *parent = r->getParentOp();
- if (!parent)
- break;
-
- // If the operation has more than one region, also count in
- // which of the regions
- if (parent->getRegions().size() > 1) {
- auto getRegionIndex = [](Operation *o, Region *r) {
- for (auto [idx, region] :
- llvm::enumerate(o->getRegions())) {
- if (&region == r)
- return idx;
- }
- llvm_unreachable("Region not child its parent operation");
- };
- size_t regionIdx = getRegionIndex(parent, r);
- components.push_back(("r" + Twine(regionIdx)).str());
- }
-
- // next parent
- o = parent;
- }
-
- SmallString<64> Name("canonloop");
- for (const std::string &s : reverse(components)) {
- Name += '_';
- Name += s;
- }
-
- return Name;
+ return generateLoopNestingName("canonloop", op);
})
.Case([&](UnrollHeuristicOp op) -> std::string {
llvm_unreachable("heuristic unrolling does not generate a loop");
})
+ .Case([&](TileOp op) -> std::string {
+ auto [generateesFirst, generateesCount] =
+ op.getGenerateesODSOperandIndexAndLength();
+ unsigned firstGrid = generateesFirst;
+ unsigned firstIntratile = generateesFirst + generateesCount / 2;
+ unsigned end = generateesFirst + generateesCount;
+ unsigned opnum = generator->getOperandNumber();
+ // In the OpenMP apply and looprange clauses, indices are 1-based
+ if (firstGrid <= opnum && opnum < firstIntratile) {
+ unsigned gridnum = opnum - firstGrid + 1;
+ return ("grid" + Twine(gridnum)).str();
+ }
+ if (firstIntratile <= opnum && opnum < end) {
+ unsigned intratilenum = opnum - firstIntratile + 1;
+ return ("intratile" + Twine(intratilenum)).str();
+ }
+ llvm_unreachable("Unexpected generatee argument");
+ })
.Default([&](Operation *op) {
assert(false && "TODO: Custom name for this operation");
return "transformed";
@@ -3323,7 +3511,8 @@ void CanonicalLoopOp::getAsmBlockNames(OpAsmSetBlockNameFn setNameFn) {
void CanonicalLoopOp::getAsmBlockArgumentNames(Region &region,
OpAsmSetValueNameFn setNameFn) {
- setNameFn(region.getArgument(0), "iv");
+ std::string ivName = generateLoopNestingName("iv", *this);
+ setNameFn(region.getArgument(0), ivName);
}
void CanonicalLoopOp::print(OpAsmPrinter &p) {
@@ -3465,6 +3654,138 @@ UnrollHeuristicOp::getGenerateesODSOperandIndexAndLength() {
}
//===----------------------------------------------------------------------===//
+// TileOp
+//===----------------------------------------------------------------------===//
+
+static void printLoopTransformClis(OpAsmPrinter &p, TileOp op,
+ OperandRange generatees,
+ OperandRange applyees) {
+ if (!generatees.empty())
+ p << '(' << llvm::interleaved(generatees) << ')';
+
+ if (!applyees.empty())
+ p << " <- (" << llvm::interleaved(applyees) << ')';
+}
+
+static ParseResult parseLoopTransformClis(
+ OpAsmParser &parser,
+ SmallVectorImpl<OpAsmParser::UnresolvedOperand> &generateesOperands,
+ SmallVectorImpl<OpAsmParser::UnresolvedOperand> &applyeesOperands) {
+ if (parser.parseOptionalLess()) {
+ // Syntax 1: generatees present
+
+ if (parser.parseOperandList(generateesOperands,
+ mlir::OpAsmParser::Delimiter::Paren))
+ return failure();
+
+ if (parser.parseLess())
+ return failure();
+ } else {
+ // Syntax 2: generatees omitted
+ }
+
+ // Parse `<-` (`<` has already been parsed)
+ if (parser.parseMinus())
+ return failure();
+
+ if (parser.parseOperandList(applyeesOperands,
+ mlir::OpAsmParser::Delimiter::Paren))
+ return failure();
+
+ return success();
+}
+
+LogicalResult TileOp::verify() {
+ if (getApplyees().empty())
+ return emitOpError() << "must apply to at least one loop";
+
+ if (getSizes().size() != getApplyees().size())
+ return emitOpError() << "there must be one tile size for each applyee";
+
+ if (!getGeneratees().empty() &&
+ 2 * getSizes().size() != getGeneratees().size())
+ return emitOpError()
+ << "expecting two times the number of generatees than applyees";
+
+ DenseSet<Value> parentIVs;
+
+ Value parent = getApplyees().front();
+ for (auto &&applyee : llvm::drop_begin(getApplyees())) {
+ auto [parentCreate, parentGen, parentCons] = decodeCli(parent);
+ auto [create, gen, cons] = decodeCli(applyee);
+
+ if (!parentGen)
+ return emitOpError() << "applyee CLI has no generator";
+
+ auto parentLoop = dyn_cast_or_null<CanonicalLoopOp>(parentGen->getOwner());
+ if (!parentGen)
+ return emitOpError()
+ << "currently only supports omp.canonical_loop as applyee";
+
+ parentIVs.insert(parentLoop.getInductionVar());
+
+ if (!gen)
+ return emitOpError() << "applyee CLI has no generator";
+ auto loop = dyn_cast_or_null<CanonicalLoopOp>(gen->getOwner());
+ if (!loop)
+ return emitOpError()
+ << "currently only supports omp.canonical_loop as applyee";
+
+ // Canonical loop must be perfectly nested, i.e. the body of the parent must
+ // only contain the omp.canonical_loop of the nested loops, and
+ // omp.terminator
+ bool isPerfectlyNested = [&]() {
+ auto &parentBody = parentLoop.getRegion();
+ if (!parentBody.hasOneBlock())
+ return false;
+ auto &parentBlock = parentBody.getBlocks().front();
+
+ auto nestedLoopIt = parentBlock.begin();
+ if (nestedLoopIt == parentBlock.end() ||
+ (&*nestedLoopIt != loop.getOperation()))
+ return false;
+
+ auto termIt = std::next(nestedLoopIt);
+ if (termIt == parentBlock.end() || !isa<TerminatorOp>(termIt))
+ return false;
+
+ if (std::next(termIt) != parentBlock.end())
+ return false;
+
+ return true;
+ }();
+ if (!isPerfectlyNested)
+ return emitOpError() << "tiled loop nest must be perfectly nested";
+
+ if (parentIVs.contains(loop.getTripCount()))
+ return emitOpError() << "tiled loop nest must be rectangular";
+
+ parent = applyee;
+ }
+
+ // TODO: The tile sizes must be computed before the loop, but checking this
+ // requires dominance analysis. For instance:
+ //
+ // %canonloop = omp.new_cli
+ // omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) {
+ // // write to %x
+ // omp.terminator
+ // }
+ // %ts = llvm.load %x
+ // omp.tile <- (%canonloop) sizes(%ts : i32)
+
+ return success();
+}
+
+std::pair<unsigned, unsigned> TileOp ::getApplyeesODSOperandIndexAndLength() {
+ return getODSOperandIndexAndLength(odsIndex_applyees);
+}
+
+std::pair<unsigned, unsigned> TileOp::getGenerateesODSOperandIndexAndLength() {
+ return getODSOperandIndexAndLength(odsIndex_generatees);
+}
+
+//===----------------------------------------------------------------------===//
// Critical construct (2.17.1)
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp
index 132ed81..3385b2a 100644
--- a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp
+++ b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp
@@ -616,11 +616,10 @@ DiagnosedSilenceableFailure transform::ApplyConversionPatternsOp::apply(
if (diag.succeeded()) {
// Tracking failure is the only failure.
return trackingFailure;
- } else {
- diag.attachNote() << "tracking listener also failed: "
- << trackingFailure.getMessage();
- (void)trackingFailure.silence();
}
+ diag.attachNote() << "tracking listener also failed: "
+ << trackingFailure.getMessage();
+ (void)trackingFailure.silence();
}
if (!diag.succeeded())
diff --git a/mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp b/mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp
index 842e880..c627158 100644
--- a/mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp
+++ b/mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp
@@ -6,13 +6,24 @@
//
//===----------------------------------------------------------------------===//
+#include "mlir/Dialect/Transform/IR/TransformOps.h"
#include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h"
+#include "mlir/IR/OpImplementation.h"
#include "llvm/Support/Debug.h"
#include "mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h"
using namespace mlir;
+static ParseResult parseAlternativesOpSelectedRegion(
+ OpAsmParser &parser, IntegerAttr &selectedRegionAttr,
+ std::optional<OpAsmParser::UnresolvedOperand> &selectedRegionParam);
+
+static void printAlternativesOpSelectedRegion(OpAsmPrinter &printer,
+ Operation *op,
+ IntegerAttr selectedRegionAttr,
+ Value selectedRegionParam);
+
#define GET_OP_CLASSES
#include "mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp.inc"
@@ -57,3 +68,176 @@ LogicalResult transform::tune::KnobOp::verify() {
return success();
}
+
+//===----------------------------------------------------------------------===//
+// AlternativesOp
+//===----------------------------------------------------------------------===//
+
+static ParseResult parseAlternativesOpSelectedRegion(
+ OpAsmParser &parser, IntegerAttr &selectedRegionAttr,
+ std::optional<OpAsmParser::UnresolvedOperand> &selectedRegionParam) {
+ size_t selectedRegionIdx;
+ OptionalParseResult attrParseRes =
+ parser.parseOptionalInteger(selectedRegionIdx);
+ if (attrParseRes.has_value()) {
+ if (failed(*attrParseRes))
+ return failure();
+
+ selectedRegionAttr = parser.getBuilder().getIndexAttr(selectedRegionIdx);
+ return success();
+ }
+
+ OpAsmParser::UnresolvedOperand param;
+ auto paramParseRes = parser.parseOptionalOperand(param);
+ if (paramParseRes.has_value()) {
+ if (failed(*paramParseRes))
+ return failure();
+
+ selectedRegionParam = param;
+ return success();
+ }
+
+ return parser.emitError(parser.getCurrentLocation())
+ << "expected either an integer attribute or a transform.param operand";
+}
+
+static void printAlternativesOpSelectedRegion(OpAsmPrinter &printer,
+ Operation *op,
+ IntegerAttr selectedRegionAttr,
+ Value selectedRegionParam) {
+ if (selectedRegionAttr)
+ printer << selectedRegionAttr.getValue();
+ if (selectedRegionParam)
+ printer << selectedRegionParam;
+}
+
+OperandRange transform::tune::AlternativesOp::getEntrySuccessorOperands(
+ RegionBranchPoint point) {
+ // No operands will be forwarded to the region(s).
+ return getOperands().slice(0, 0);
+}
+
+void transform::tune::AlternativesOp::getSuccessorRegions(
+ RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> &regions) {
+ if (point.isParent())
+ if (auto selectedRegionIdx = getSelectedRegionAttr())
+ regions.emplace_back(
+ &getAlternatives()[selectedRegionIdx->getSExtValue()],
+ Block::BlockArgListType());
+ else
+ for (Region &alternative : getAlternatives())
+ regions.emplace_back(&alternative, Block::BlockArgListType());
+ else
+ regions.emplace_back(getOperation()->getResults());
+}
+
+void transform::tune::AlternativesOp::getRegionInvocationBounds(
+ ArrayRef<Attribute> operands, SmallVectorImpl<InvocationBounds> &bounds) {
+ (void)operands;
+ bounds.reserve(getNumRegions());
+
+ if (auto selectedRegionIdx = getSelectedRegionAttr()) {
+ bounds.resize(getNumRegions(), InvocationBounds(0, 0));
+ bounds[selectedRegionIdx->getSExtValue()] = InvocationBounds(1, 1);
+ } else {
+ bounds.resize(getNumRegions(), InvocationBounds(0, 1));
+ }
+}
+
+void transform::tune::AlternativesOp::getEffects(
+ SmallVectorImpl<MemoryEffects::EffectInstance> &effects) {
+ onlyReadsHandle(getSelectedRegionParamMutable(), effects);
+ producesHandle(getOperation()->getOpResults(), effects);
+ // TODO: should effects from regions be forwarded?
+}
+
+DiagnosedSilenceableFailure
+transform::tune::AlternativesOp::apply(transform::TransformRewriter &rewriter,
+ transform::TransformResults &results,
+ transform::TransformState &state) {
+ std::optional<size_t> selectedRegionIdx;
+
+ if (auto selectedRegionAttr = getSelectedRegionAttr())
+ selectedRegionIdx = selectedRegionAttr->getSExtValue();
+
+ if (Value selectedRegionParam = getSelectedRegionParam()) {
+ ArrayRef<Attribute> associatedAttrs = state.getParams(selectedRegionParam);
+ IntegerAttr selectedRegionAttr;
+ if (associatedAttrs.size() != 1 ||
+ !(selectedRegionAttr = dyn_cast<IntegerAttr>(associatedAttrs[0])))
+ return emitDefiniteFailure()
+ << "param should hold exactly one integer attribute, got: "
+ << associatedAttrs[0];
+ selectedRegionIdx = selectedRegionAttr.getValue().getSExtValue();
+ }
+
+ if (!selectedRegionIdx)
+ return emitDefiniteFailure() << "non-deterministic choice " << getName()
+ << " is only resolved through providing a "
+ "`selected_region` attr/param";
+
+ if (*selectedRegionIdx < 0 || *selectedRegionIdx >= getNumRegions())
+ return emitDefiniteFailure()
+ << "'selected_region' attribute/param specifies region at index "
+ << *selectedRegionIdx << " while op has only " << getNumRegions()
+ << " regions";
+
+ Region &selectedRegion = getRegion(*selectedRegionIdx);
+ auto scope = state.make_region_scope(selectedRegion);
+ Block &block = selectedRegion.front();
+ // Apply the region's ops one by one.
+ for (Operation &transform : block.without_terminator()) {
+ DiagnosedSilenceableFailure result =
+ state.applyTransform(cast<transform::TransformOpInterface>(transform));
+ if (result.isDefiniteFailure())
+ return result;
+
+ if (result.isSilenceableFailure()) {
+ for (const auto &res : getResults())
+ results.set(res, {});
+ return result;
+ }
+ }
+ // Forward the operation mapping for values yielded from the region to the
+ // values produced by the alternatives op.
+ transform::detail::forwardTerminatorOperands(&block, state, results);
+ return DiagnosedSilenceableFailure::success();
+}
+
+LogicalResult transform::tune::AlternativesOp::verify() {
+ for (auto *region : getRegions()) {
+ auto yieldTerminator =
+ llvm::dyn_cast_if_present<transform::YieldOp>(region->front().back());
+ if (!yieldTerminator)
+ return emitOpError() << "expected '"
+ << transform::YieldOp::getOperationName()
+ << "' as terminator";
+
+ if (yieldTerminator->getNumOperands() != getNumResults())
+ return yieldTerminator.emitOpError()
+ << "expected terminator to have as many operands as the parent op "
+ "has results";
+
+ for (auto [i, operandType, resultType] : llvm::zip_equal(
+ llvm::seq<unsigned>(0, yieldTerminator->getNumOperands()),
+ yieldTerminator->getOperands().getType(), getResultTypes())) {
+ if (operandType == resultType)
+ continue;
+ return yieldTerminator.emitOpError()
+ << "the type of the terminator operand #" << i
+ << " must match the type of the corresponding parent op result ("
+ << operandType << " vs " << resultType << ")";
+ }
+ }
+
+ if (auto selectedRegionAttr = getSelectedRegionAttr()) {
+ size_t regionIdx = selectedRegionAttr->getSExtValue();
+ if (regionIdx < 0 || regionIdx >= getNumRegions())
+ return emitOpError()
+ << "'selected_region' attribute specifies region at index "
+ << regionIdx << " while op has only " << getNumRegions()
+ << " regions";
+ }
+
+ return success();
+}
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index eb46869..b0132e8 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -580,7 +580,7 @@ namespace {
// ElideSingleElementReduction for ReduceOp.
struct ElideUnitDimsInMultiDimReduction
: public OpRewritePattern<MultiDimReductionOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(MultiDimReductionOp reductionOp,
PatternRewriter &rewriter) const override {
@@ -730,7 +730,7 @@ std::optional<SmallVector<int64_t, 4>> ReductionOp::getShapeForUnroll() {
namespace {
struct ElideSingleElementReduction : public OpRewritePattern<ReductionOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ReductionOp reductionOp,
PatternRewriter &rewriter) const override {
@@ -2197,7 +2197,7 @@ namespace {
// Pattern to rewrite a ExtractOp(Broadcast) -> Broadcast.
class ExtractOpFromBroadcast final : public OpRewritePattern<ExtractOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ExtractOp extractOp,
PatternRewriter &rewriter) const override {
@@ -2220,7 +2220,7 @@ public:
// Pattern to rewrite a ExtractOp(CreateMask) -> CreateMask.
class ExtractOpFromCreateMask final : public OpRewritePattern<ExtractOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ExtractOp extractOp,
PatternRewriter &rewriter) const override {
@@ -2546,7 +2546,7 @@ rewriteFromElementsAsBroadcast(FromElementsOp fromElementsOp,
class FromElementsToShapeCast : public OpRewritePattern<FromElementsOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(FromElementsOp fromElements,
PatternRewriter &rewriter) const override {
@@ -2938,7 +2938,7 @@ namespace {
// Fold broadcast1(broadcast2(x)) into broadcast1(x).
struct BroadcastFolder : public OpRewritePattern<BroadcastOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(BroadcastOp broadcastOp,
PatternRewriter &rewriter) const override {
@@ -3109,7 +3109,7 @@ namespace {
// Pattern to rewrite a 0-D shuffle with [0] or [1] mask returning a 1-D vector
// to a broadcast.
struct Canonicalize0DShuffleOp : public OpRewritePattern<ShuffleOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ShuffleOp shuffleOp,
PatternRewriter &rewriter) const override {
@@ -3165,7 +3165,7 @@ static Value getScalarSplatSource(Value value) {
/// Pattern to rewrite shuffle(splat-like(v), splat-like(v)) as broadcast(v).
class ShuffleSplat final : public OpRewritePattern<ShuffleOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ShuffleOp op,
PatternRewriter &rewriter) const override {
@@ -3182,7 +3182,7 @@ public:
/// vector.interleave.
class ShuffleInterleave : public OpRewritePattern<ShuffleOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ShuffleOp op,
PatternRewriter &rewriter) const override {
@@ -3326,7 +3326,7 @@ namespace {
// broadcast.
class InsertToBroadcast final : public OpRewritePattern<InsertOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(InsertOp insertOp,
PatternRewriter &rewriter) const override {
@@ -3344,7 +3344,7 @@ public:
/// Pattern to rewrite a insert(splat-like(v), splat-like(v)) as broadcast(v).
class InsertSplatToSplat final : public OpRewritePattern<InsertOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(InsertOp op,
PatternRewriter &rewriter) const override {
@@ -3380,7 +3380,7 @@ public:
/// %result = vector.from_elements %c1, %c2 : vector<2xi32>
class InsertChainFullyInitialized final : public OpRewritePattern<InsertOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(InsertOp op,
PatternRewriter &rewriter) const override {
@@ -3748,7 +3748,7 @@ namespace {
class FoldInsertStridedSliceSplat final
: public OpRewritePattern<InsertStridedSliceOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(InsertStridedSliceOp insertStridedSliceOp,
PatternRewriter &rewriter) const override {
@@ -3768,7 +3768,7 @@ public:
class FoldInsertStridedSliceOfExtract final
: public OpRewritePattern<InsertStridedSliceOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(InsertStridedSliceOp insertStridedSliceOp,
PatternRewriter &rewriter) const override {
@@ -3798,7 +3798,7 @@ public:
class InsertStridedSliceConstantFolder final
: public OpRewritePattern<InsertStridedSliceOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
// Do not create constants with more than `vectorSizeFoldThreashold` elements,
// unless the source vector constant has a single use.
@@ -4250,7 +4250,7 @@ namespace {
// %mask = vector.create_mask %new_ub : vector<8xi1>
class StridedSliceCreateMaskFolder final
: public OpRewritePattern<ExtractStridedSliceOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
public:
LogicalResult matchAndRewrite(ExtractStridedSliceOp extractStridedSliceOp,
@@ -4310,7 +4310,7 @@ public:
class StridedSliceConstantMaskFolder final
: public OpRewritePattern<ExtractStridedSliceOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ExtractStridedSliceOp extractStridedSliceOp,
PatternRewriter &rewriter) const override {
@@ -4365,7 +4365,7 @@ public:
class StridedSliceBroadcast final
: public OpRewritePattern<ExtractStridedSliceOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
PatternRewriter &rewriter) const override {
@@ -4416,7 +4416,7 @@ public:
/// Rewrite extract_strided_slice(splat-like(v)) with broadcast(v).
class StridedSliceSplat final : public OpRewritePattern<ExtractStridedSliceOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
PatternRewriter &rewriter) const override {
@@ -4448,7 +4448,7 @@ public:
class ContiguousExtractStridedSliceToExtract final
: public OpRewritePattern<ExtractStridedSliceOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
PatternRewriter &rewriter) const override {
@@ -5023,7 +5023,7 @@ namespace {
/// ```
struct TransferReadAfterWriteToBroadcast
: public OpRewritePattern<TransferReadOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(TransferReadOp readOp,
PatternRewriter &rewriter) const override {
@@ -5458,7 +5458,7 @@ namespace {
/// any other uses.
class FoldWaw final : public OpRewritePattern<TransferWriteOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(TransferWriteOp writeOp,
PatternRewriter &rewriter) const override {
if (!llvm::isa<RankedTensorType>(writeOp.getShapedType()))
@@ -5514,7 +5514,7 @@ public:
struct SwapExtractSliceOfTransferWrite
: public OpRewritePattern<tensor::InsertSliceOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(tensor::InsertSliceOp insertOp,
PatternRewriter &rewriter) const override {
@@ -5737,7 +5737,7 @@ LogicalResult MaskedLoadOp::verify() {
namespace {
class MaskedLoadFolder final : public OpRewritePattern<MaskedLoadOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(MaskedLoadOp load,
PatternRewriter &rewriter) const override {
switch (getMaskFormat(load.getMask())) {
@@ -5794,7 +5794,7 @@ LogicalResult MaskedStoreOp::verify() {
namespace {
class MaskedStoreFolder final : public OpRewritePattern<MaskedStoreOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(MaskedStoreOp store,
PatternRewriter &rewriter) const override {
switch (getMaskFormat(store.getMask())) {
@@ -5890,7 +5890,7 @@ static LogicalResult isZeroBasedContiguousSeq(Value indexVec) {
namespace {
class GatherFolder final : public OpRewritePattern<GatherOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(GatherOp gather,
PatternRewriter &rewriter) const override {
switch (getMaskFormat(gather.getMask())) {
@@ -5910,7 +5910,7 @@ public:
/// maskedload. Only 1D fixed vectors are supported for now.
class FoldContiguousGather final : public OpRewritePattern<GatherOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(GatherOp op,
PatternRewriter &rewriter) const override {
if (!isa<MemRefType>(op.getBase().getType()))
@@ -5962,7 +5962,7 @@ LogicalResult ScatterOp::verify() {
namespace {
class ScatterFolder final : public OpRewritePattern<ScatterOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ScatterOp scatter,
PatternRewriter &rewriter) const override {
switch (getMaskFormat(scatter.getMask())) {
@@ -5982,7 +5982,7 @@ public:
/// maskedstore. Only 1D fixed vectors are supported for now.
class FoldContiguousScatter final : public OpRewritePattern<ScatterOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ScatterOp op,
PatternRewriter &rewriter) const override {
if (failed(isZeroBasedContiguousSeq(op.getIndices())))
@@ -6030,7 +6030,7 @@ LogicalResult ExpandLoadOp::verify() {
namespace {
class ExpandLoadFolder final : public OpRewritePattern<ExpandLoadOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ExpandLoadOp expand,
PatternRewriter &rewriter) const override {
switch (getMaskFormat(expand.getMask())) {
@@ -6081,7 +6081,7 @@ LogicalResult CompressStoreOp::verify() {
namespace {
class CompressStoreFolder final : public OpRewritePattern<CompressStoreOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(CompressStoreOp compress,
PatternRewriter &rewriter) const override {
switch (getMaskFormat(compress.getMask())) {
@@ -6260,7 +6260,7 @@ static VectorType trimTrailingOneDims(VectorType oldType) {
class ShapeCastCreateMaskFolderTrailingOneDim final
: public OpRewritePattern<ShapeCastOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ShapeCastOp shapeOp,
PatternRewriter &rewriter) const override {
@@ -6330,7 +6330,7 @@ public:
/// If both (i) and (ii) are possible, (i) is chosen.
class ShapeCastBroadcastFolder final : public OpRewritePattern<ShapeCastOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ShapeCastOp shapeCastOp,
PatternRewriter &rewriter) const override {
@@ -6614,7 +6614,7 @@ namespace {
// Rewrites two back-to-back TransposeOp operations into a single TransposeOp.
class TransposeFolder final : public OpRewritePattern<vector::TransposeOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::TransposeOp transposeOp,
PatternRewriter &rewriter) const override {
@@ -6646,7 +6646,7 @@ public:
/// Replace transpose(splat-like(v)) with broadcast(v)
class FoldTransposeSplat final : public OpRewritePattern<TransposeOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(TransposeOp transposeOp,
PatternRewriter &rewriter) const override {
@@ -6663,7 +6663,7 @@ public:
/// Folds transpose(create_mask) into a new transposed create_mask.
class FoldTransposeCreateMask final : public OpRewritePattern<TransposeOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(TransposeOp transpOp,
PatternRewriter &rewriter) const override {
@@ -6700,7 +6700,7 @@ public:
/// Folds transpose(shape_cast) into a new shape_cast.
class FoldTransposeShapeCast final : public OpRewritePattern<TransposeOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(TransposeOp transposeOp,
PatternRewriter &rewriter) const override {
@@ -6750,7 +6750,7 @@ public:
/// within the groups [0,1] and [3,4], like (1 0 2 4 3 5 6).
class FoldTransposeBroadcast : public OpRewritePattern<vector::TransposeOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
FoldTransposeBroadcast(MLIRContext *context, PatternBenefit benefit = 1)
: OpRewritePattern<vector::TransposeOp>(context, benefit) {}
@@ -6971,7 +6971,7 @@ namespace {
/// %0 = vector.constant_mask [8, 16] : vector<8x[16]xi1>
class CreateMaskFolder final : public OpRewritePattern<CreateMaskOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(CreateMaskOp createMaskOp,
PatternRewriter &rewriter) const override {
@@ -7300,7 +7300,7 @@ LogicalResult MaskOp::fold(FoldAdaptor adaptor,
/// %0 = arith.select %mask, %a, %passthru : vector<8xf32>
///
class CanonializeEmptyMaskOp : public OpRewritePattern<MaskOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(MaskOp maskOp,
PatternRewriter &rewriter) const override {
@@ -7410,7 +7410,7 @@ OpFoldResult SplatOp::fold(FoldAdaptor adaptor) {
// vector.broadcast.
class SplatToBroadcastPattern final : public OpRewritePattern<SplatOp> {
public:
- using OpRewritePattern<SplatOp>::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(SplatOp splatOp,
PatternRewriter &rewriter) const override {
rewriter.replaceOpWithNewOp<vector::BroadcastOp>(splatOp, splatOp.getType(),
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp
index dedc3b3..61d9357 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp
@@ -34,7 +34,7 @@ namespace {
/// convertible to the lower level target dialect (LLVM, SPIR-V, etc.) directly.
class BroadcastOpLowering : public OpRewritePattern<vector::BroadcastOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::BroadcastOp op,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp
index 65702ff..efe8d14 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp
@@ -1151,7 +1151,7 @@ FailureOr<Value> ContractionOpLowering::lowerReduction(
///
class OuterProductOpLowering : public OpRewritePattern<vector::OuterProductOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::OuterProductOp op,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorGather.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorGather.cpp
index 1f96a3a..6bc8347 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorGather.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorGather.cpp
@@ -50,7 +50,7 @@ namespace {
///
/// Supports vector types with a fixed leading dimension.
struct UnrollGather : OpRewritePattern<vector::GatherOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::GatherOp op,
PatternRewriter &rewriter) const override {
@@ -98,7 +98,7 @@ struct UnrollGather : OpRewritePattern<vector::GatherOp> {
/// ATM this is effectively limited to reading a 1D Vector from a 2D MemRef,
/// but should be fairly straightforward to extend beyond that.
struct RemoveStrideFromGatherSource : OpRewritePattern<vector::GatherOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::GatherOp op,
PatternRewriter &rewriter) const override {
@@ -164,7 +164,7 @@ struct RemoveStrideFromGatherSource : OpRewritePattern<vector::GatherOp> {
/// `tensor.extract`s. To avoid out-of-bounds memory accesses, these
/// loads/extracts are made conditional using `scf.if` ops.
struct Gather1DToConditionalLoads : OpRewritePattern<vector::GatherOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::GatherOp op,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp
index 9d6a865..479fc0c 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp
@@ -163,7 +163,7 @@ private:
/// : vector<7xi16>, vector<7xi16>
/// ```
struct InterleaveToShuffle final : OpRewritePattern<vector::InterleaveOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::InterleaveOp op,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp
index 5617b06..7730c4e 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp
@@ -48,7 +48,7 @@ namespace {
/// until a one-dimensional vector is reached.
class CreateMaskOpLowering : public OpRewritePattern<vector::CreateMaskOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::CreateMaskOp op,
PatternRewriter &rewriter) const override {
@@ -100,7 +100,7 @@ public:
/// will be folded at LLVM IR level.
class ConstantMaskOpLowering : public OpRewritePattern<vector::ConstantMaskOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ConstantMaskOp op,
PatternRewriter &rewriter) const override {
@@ -184,7 +184,7 @@ namespace {
/// and actually match the traits of its the nested `MaskableOpInterface`.
template <class SourceOp>
struct MaskOpRewritePattern : OpRewritePattern<MaskOp> {
- using OpRewritePattern<MaskOp>::OpRewritePattern;
+ using Base::Base;
private:
LogicalResult matchAndRewrite(MaskOp maskOp,
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMultiReduction.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMultiReduction.cpp
index 4773732d..e86e2a9 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMultiReduction.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMultiReduction.cpp
@@ -39,7 +39,7 @@ namespace {
class InnerOuterDimReductionConversion
: public OpRewritePattern<vector::MultiDimReductionOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
explicit InnerOuterDimReductionConversion(
MLIRContext *context, vector::VectorMultiReductionLowering options,
@@ -136,7 +136,7 @@ private:
class ReduceMultiDimReductionRank
: public OpRewritePattern<vector::MultiDimReductionOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
explicit ReduceMultiDimReductionRank(
MLIRContext *context, vector::VectorMultiReductionLowering options,
@@ -304,7 +304,7 @@ private:
/// and combines results
struct TwoDimMultiReductionToElementWise
: public OpRewritePattern<vector::MultiDimReductionOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::MultiDimReductionOp multiReductionOp,
PatternRewriter &rewriter) const override {
@@ -359,7 +359,7 @@ struct TwoDimMultiReductionToElementWise
/// a sequence of vector.reduction ops.
struct TwoDimMultiReductionToReduction
: public OpRewritePattern<vector::MultiDimReductionOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::MultiDimReductionOp multiReductionOp,
PatternRewriter &rewriter) const override {
@@ -420,7 +420,7 @@ struct TwoDimMultiReductionToReduction
/// separately.
struct OneDimMultiReductionToTwoDim
: public OpRewritePattern<vector::MultiDimReductionOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::MultiDimReductionOp multiReductionOp,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp
index af4851e..258f2cb 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp
@@ -99,7 +99,7 @@ namespace {
/// return %7, %8 : vector<2x3xi32>, vector<2xi32>
/// ```
struct ScanToArithOps : public OpRewritePattern<vector::ScanOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ScanOp scanOp,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp
index 603ea41..c5f22b2 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp
@@ -189,7 +189,7 @@ class ShapeCastOpRewritePattern : public OpRewritePattern<vector::ShapeCastOp> {
}
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ShapeCastOp op,
PatternRewriter &rewriter) const override {
@@ -356,7 +356,7 @@ public:
class ScalableShapeCastOpRewritePattern
: public OpRewritePattern<vector::ShapeCastOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ShapeCastOp op,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShuffle.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShuffle.cpp
index 78102f7..8f46ad6 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShuffle.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShuffle.cpp
@@ -44,7 +44,7 @@ namespace {
///
struct MixedSizeInputShuffleOpRewrite final
: OpRewritePattern<vector::ShuffleOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ShuffleOp shuffleOp,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorStep.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorStep.cpp
index ee5568a..08e7c89 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorStep.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorStep.cpp
@@ -24,7 +24,7 @@ using namespace mlir::vector;
namespace {
struct StepToArithConstantOpRewrite final : OpRewritePattern<vector::StepOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::StepOp stepOp,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorToFromElementsToShuffleTree.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorToFromElementsToShuffleTree.cpp
index 6407a86..7521e24 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorToFromElementsToShuffleTree.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorToFromElementsToShuffleTree.cpp
@@ -667,7 +667,7 @@ getToElementsDefiningOps(FromElementsOp fromElemsOp,
struct ToFromElementsToShuffleTreeRewrite final
: OpRewritePattern<vector::FromElementsOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::FromElementsOp fromElemsOp,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp
index 9e7d0ce..c3f7de0 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp
@@ -300,7 +300,7 @@ namespace {
/// %x = vector.insert .., .. [.., ..]
class TransposeOpLowering : public OpRewritePattern<vector::TransposeOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
TransposeOpLowering(vector::VectorTransposeLowering vectorTransposeLowering,
MLIRContext *context, PatternBenefit benefit = 1)
@@ -395,7 +395,7 @@ private:
class Transpose2DWithUnitDimToShapeCast
: public OpRewritePattern<vector::TransposeOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
Transpose2DWithUnitDimToShapeCast(MLIRContext *context,
PatternBenefit benefit = 1)
@@ -433,7 +433,7 @@ public:
class TransposeOp2DToShuffleLowering
: public OpRewritePattern<vector::TransposeOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
TransposeOp2DToShuffleLowering(
vector::VectorTransposeLowering vectorTransposeLowering,
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
index cab1289..963b2c8 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
@@ -54,7 +54,7 @@ namespace {
// input by inserting vector.broadcast.
struct CastAwayExtractStridedSliceLeadingOneDim
: public OpRewritePattern<vector::ExtractStridedSliceOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ExtractStridedSliceOp extractOp,
PatternRewriter &rewriter) const override {
@@ -104,7 +104,7 @@ struct CastAwayExtractStridedSliceLeadingOneDim
// inputs by inserting vector.broadcast.
struct CastAwayInsertStridedSliceLeadingOneDim
: public OpRewritePattern<vector::InsertStridedSliceOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::InsertStridedSliceOp insertOp,
PatternRewriter &rewriter) const override {
@@ -145,7 +145,7 @@ struct CastAwayInsertStridedSliceLeadingOneDim
// Casts away leading one dimensions in vector.insert's vector inputs by
// inserting vector.broadcast.
struct CastAwayInsertLeadingOneDim : public OpRewritePattern<vector::InsertOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::InsertOp insertOp,
PatternRewriter &rewriter) const override {
@@ -221,7 +221,7 @@ static Value dropUnitDimsFromMask(OpBuilder &b, Location loc, Value mask,
// 1 dimensions.
struct CastAwayTransferReadLeadingOneDim
: public OpRewritePattern<vector::TransferReadOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::TransferReadOp read,
PatternRewriter &rewriter) const override {
@@ -275,7 +275,7 @@ struct CastAwayTransferReadLeadingOneDim
// 1 dimensions.
struct CastAwayTransferWriteLeadingOneDim
: public OpRewritePattern<vector::TransferWriteOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::TransferWriteOp write,
PatternRewriter &rewriter) const override {
@@ -541,7 +541,7 @@ public:
// vector.broadcast back to the original shape.
struct CastAwayConstantMaskLeadingOneDim
: public OpRewritePattern<vector::ConstantMaskOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ConstantMaskOp mask,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateMaskedLoadStore.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateMaskedLoadStore.cpp
index bdbb792..7acc120 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateMaskedLoadStore.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateMaskedLoadStore.cpp
@@ -48,7 +48,7 @@ namespace {
///
struct VectorMaskedLoadOpConverter final
: OpRewritePattern<vector::MaskedLoadOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::MaskedLoadOp maskedLoadOp,
PatternRewriter &rewriter) const override {
@@ -117,7 +117,7 @@ struct VectorMaskedLoadOpConverter final
///
struct VectorMaskedStoreOpConverter final
: OpRewritePattern<vector::MaskedStoreOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::MaskedStoreOp maskedStoreOp,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
index 264cbc1..3a6684f 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
@@ -548,7 +548,7 @@ namespace {
// NOTE: By default, all RMW sequences are atomic. Set `disableAtomicRMW` to
// `false` to generate non-atomic RMW sequences.
struct ConvertVectorStore final : OpConversionPattern<vector::StoreOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
ConvertVectorStore(MLIRContext *context, bool disableAtomicRMW)
: OpConversionPattern<vector::StoreOp>(context),
@@ -827,7 +827,7 @@ private:
/// adjusted mask .
struct ConvertVectorMaskedStore final
: OpConversionPattern<vector::MaskedStoreOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LogicalResult
matchAndRewrite(vector::MaskedStoreOp op, OpAdaptor adaptor,
@@ -950,7 +950,7 @@ struct ConvertVectorMaskedStore final
/// those cases, loads are converted to byte-aligned, byte-sized loads and the
/// target vector is extracted from the loaded vector.
struct ConvertVectorLoad final : OpConversionPattern<vector::LoadOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LogicalResult
matchAndRewrite(vector::LoadOp op, OpAdaptor adaptor,
@@ -1059,7 +1059,7 @@ struct ConvertVectorLoad final : OpConversionPattern<vector::LoadOp> {
/// bitcasting, since each `i8` container element holds two `i4` values.
struct ConvertVectorMaskedLoad final
: OpConversionPattern<vector::MaskedLoadOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LogicalResult
matchAndRewrite(vector::MaskedLoadOp op, OpAdaptor adaptor,
@@ -1257,7 +1257,7 @@ static bool fitsInMultiByteContainerTy(VectorType subByteVecTy,
// TODO: Document-me
struct ConvertVectorTransferRead final
: OpConversionPattern<vector::TransferReadOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LogicalResult
matchAndRewrite(vector::TransferReadOp op, OpAdaptor adaptor,
@@ -1942,7 +1942,7 @@ namespace {
/// advantage of high-level information to avoid leaving LLVM to scramble with
/// peephole optimizations.
struct RewriteBitCastOfTruncI : OpRewritePattern<vector::BitCastOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::BitCastOp bitCastOp,
PatternRewriter &rewriter) const override {
@@ -2147,7 +2147,7 @@ struct RewriteAlignedSubByteIntExt : OpRewritePattern<ConversionOpType> {
/// %5 = vector.bitcast %4 : vector<4xi8> to vector<8xi4>
///
struct RewriteAlignedSubByteIntTrunc : OpRewritePattern<arith::TruncIOp> {
- using OpRewritePattern<arith::TruncIOp>::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(arith::TruncIOp truncOp,
PatternRewriter &rewriter) const override {
@@ -2200,7 +2200,7 @@ struct RewriteAlignedSubByteIntTrunc : OpRewritePattern<arith::TruncIOp> {
/// %2 = arith.trunci %1 : vector<16x8xi8> to vector<16x8xi4>
///
struct RewriteVectorTranspose : OpRewritePattern<vector::TransposeOp> {
- using OpRewritePattern<vector::TransposeOp>::OpRewritePattern;
+ using Base::Base;
RewriteVectorTranspose(MLIRContext *context, PatternBenefit benefit)
: OpRewritePattern<vector::TransposeOp>(context, benefit) {}
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp
index f6d6555..9e49873 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp
@@ -34,7 +34,7 @@ using namespace mlir::vector;
class DecomposeDifferentRankInsertStridedSlice
: public OpRewritePattern<InsertStridedSliceOp> {
public:
- using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(InsertStridedSliceOp op,
PatternRewriter &rewriter) const override {
@@ -84,7 +84,7 @@ public:
class ConvertSameRankInsertStridedSliceIntoShuffle
: public OpRewritePattern<InsertStridedSliceOp> {
public:
- using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
+ using Base::Base;
void initialize() {
// This pattern creates recursive InsertStridedSliceOp, but the recursion is
@@ -183,7 +183,7 @@ public:
class Convert1DExtractStridedSliceIntoShuffle
: public OpRewritePattern<ExtractStridedSliceOp> {
public:
- using OpRewritePattern<ExtractStridedSliceOp>::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
PatternRewriter &rewriter) const override {
@@ -271,7 +271,7 @@ private:
class DecomposeNDExtractStridedSlice
: public OpRewritePattern<ExtractStridedSliceOp> {
public:
- using OpRewritePattern<ExtractStridedSliceOp>::OpRewritePattern;
+ using Base::Base;
void initialize() {
// This pattern creates recursive ExtractStridedSliceOp, but the recursion
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
index 82bac8c..71fba71c 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
@@ -214,7 +214,7 @@ SmallVector<int64_t> static getStridedSliceInsertionIndices(
/// vector.extract_strided_slice operation.
struct LinearizeVectorExtractStridedSlice final
: public mlir::OpConversionPattern<mlir::vector::ExtractStridedSliceOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LinearizeVectorExtractStridedSlice(const TypeConverter &typeConverter,
MLIRContext *context,
PatternBenefit benefit = 1)
@@ -285,7 +285,7 @@ struct LinearizeVectorExtractStridedSlice final
///
struct LinearizeVectorInsertStridedSlice final
: public mlir::OpConversionPattern<mlir::vector::InsertStridedSliceOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LinearizeVectorInsertStridedSlice(const TypeConverter &typeConverter,
MLIRContext *context,
PatternBenefit benefit = 1)
@@ -348,7 +348,7 @@ struct LinearizeVectorInsertStridedSlice final
/// of the original shuffle operation.
struct LinearizeVectorShuffle final
: public OpConversionPattern<vector::ShuffleOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LinearizeVectorShuffle(const TypeConverter &typeConverter,
MLIRContext *context, PatternBenefit benefit = 1)
: OpConversionPattern(typeConverter, context, benefit) {}
@@ -423,7 +423,7 @@ struct LinearizeVectorShuffle final
///
struct LinearizeVectorExtract final
: public OpConversionPattern<vector::ExtractOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LinearizeVectorExtract(const TypeConverter &typeConverter,
MLIRContext *context, PatternBenefit benefit = 1)
: OpConversionPattern(typeConverter, context, benefit) {}
@@ -501,7 +501,7 @@ struct LinearizeVectorExtract final
///
struct LinearizeVectorInsert final
: public OpConversionPattern<vector::InsertOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LinearizeVectorInsert(const TypeConverter &typeConverter,
MLIRContext *context, PatternBenefit benefit = 1)
: OpConversionPattern(typeConverter, context, benefit) {}
@@ -575,7 +575,7 @@ struct LinearizeVectorInsert final
/// %out_nd = vector.shape_cast %out_1d: vector<16xf16> to vector<4x4xf16>
struct LinearizeVectorBitCast final
: public OpConversionPattern<vector::BitCastOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LinearizeVectorBitCast(const TypeConverter &typeConverter,
MLIRContext *context, PatternBenefit benefit = 1)
: OpConversionPattern(typeConverter, context, benefit) {}
@@ -598,7 +598,7 @@ struct LinearizeVectorBitCast final
/// %out_nd = vector.shape_cast %out_1d : vector<16xf32> to vector<4x4xf32>
struct LinearizeVectorSplat final
: public OpConversionPattern<vector::SplatOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LinearizeVectorSplat(const TypeConverter &typeConverter, MLIRContext *context,
PatternBenefit benefit = 1)
@@ -629,7 +629,7 @@ struct LinearizeVectorSplat final
/// %shape_cast = vector.shape_cast %mask : vector<4xi1> to vector<1x4xi1>
struct LinearizeVectorCreateMask final
: OpConversionPattern<vector::CreateMaskOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LinearizeVectorCreateMask(const TypeConverter &typeConverter,
MLIRContext *context, PatternBenefit benefit = 1)
@@ -684,7 +684,7 @@ struct LinearizeVectorCreateMask final
/// For generic cases, the vector unroll pass should be used to unroll the load
/// to vector<1x1x...xN> form and then linearized
struct LinearizeVectorLoad final : public OpConversionPattern<vector::LoadOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LinearizeVectorLoad(const TypeConverter &typeConverter, MLIRContext *context,
PatternBenefit benefit = 1)
: OpConversionPattern(typeConverter, context, benefit) {}
@@ -731,7 +731,7 @@ struct LinearizeVectorLoad final : public OpConversionPattern<vector::LoadOp> {
/// to vector<1x1x...xN> form and then linearized
struct LinearizeVectorStore final
: public OpConversionPattern<vector::StoreOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LinearizeVectorStore(const TypeConverter &typeConverter, MLIRContext *context,
PatternBenefit benefit = 1)
: OpConversionPattern(typeConverter, context, benefit) {}
@@ -778,7 +778,7 @@ struct LinearizeVectorStore final
///
struct LinearizeVectorFromElements final
: public OpConversionPattern<vector::FromElementsOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LinearizeVectorFromElements(const TypeConverter &typeConverter,
MLIRContext *context, PatternBenefit benefit = 1)
: OpConversionPattern(typeConverter, context, benefit) {}
@@ -814,7 +814,7 @@ struct LinearizeVectorFromElements final
///
struct LinearizeVectorToElements final
: public OpConversionPattern<vector::ToElementsOp> {
- using OpConversionPattern::OpConversionPattern;
+ using Base::Base;
LinearizeVectorToElements(const TypeConverter &typeConverter,
MLIRContext *context, PatternBenefit benefit = 1)
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
index c364a8b..1121d95 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
@@ -1081,7 +1081,7 @@ private:
/// Rewrite transfer_writes of vectors of size 1 (e.g., vector<1x1xf32>)
/// to memref.store.
class RewriteScalarWrite : public OpRewritePattern<vector::TransferWriteOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::TransferWriteOp xferOp,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index 866f789..d6a6d7cd 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -78,7 +78,7 @@ namespace {
/// ```
struct MultiReduceToContract
: public OpRewritePattern<vector::MultiDimReductionOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::MultiDimReductionOp reduceOp,
PatternRewriter &rewriter) const override {
@@ -138,7 +138,7 @@ struct MultiReduceToContract
/// ```
struct CombineContractABTranspose final
: public OpRewritePattern<vector::ContractionOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ContractionOp contractOp,
PatternRewriter &rewriter) const override {
@@ -202,7 +202,7 @@ struct CombineContractABTranspose final
/// ```
struct CombineContractResultTranspose final
: public OpRewritePattern<vector::TransposeOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::TransposeOp resTOp,
PatternRewriter &rewriter) const override {
@@ -568,7 +568,7 @@ static SmallVector<int64_t> getIntValueVector(ArrayAttr arrayAttr) {
// %2 = vector.extract %1[1] : f16 from vector<2xf16>
struct BubbleDownVectorBitCastForExtract
: public OpRewritePattern<vector::ExtractOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ExtractOp extractOp,
PatternRewriter &rewriter) const override {
@@ -643,7 +643,7 @@ struct BubbleDownVectorBitCastForExtract
// %1 = vector.bitcast %0 : vector<2xf32> to vector<4xf16>
struct BubbleDownBitCastForStridedSliceExtract
: public OpRewritePattern<vector::ExtractStridedSliceOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ExtractStridedSliceOp extractOp,
PatternRewriter &rewriter) const override {
@@ -721,7 +721,7 @@ struct BubbleDownBitCastForStridedSliceExtract
// %2 = vector.insert %0, %1 [4] : vector<16xi8> into vector<8x16xi8>
//
struct BubbleUpBitCastForInsert : public OpRewritePattern<vector::BitCastOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp,
PatternRewriter &rewriter) const override {
@@ -794,7 +794,7 @@ struct BubbleUpBitCastForInsert : public OpRewritePattern<vector::BitCastOp> {
// offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32>
struct BubbleUpBitCastForStridedSliceInsert
: public OpRewritePattern<vector::BitCastOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp,
PatternRewriter &rewriter) const override {
@@ -892,7 +892,7 @@ struct BubbleUpBitCastForStridedSliceInsert
// %7 = vector.insert_strided_slice %6, %cst {
// offsets = [2], strides = [1]} : vector<2xf32> into vector<4xf32>
struct BreakDownVectorBitCast : public OpRewritePattern<vector::BitCastOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
public:
BreakDownVectorBitCast(MLIRContext *context,
@@ -1131,7 +1131,7 @@ struct ReorderElementwiseOpsOnBroadcast final
class ExtractOpFromElementwise final
: public OpRewritePattern<vector::ExtractOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ExtractOp op,
PatternRewriter &rewriter) const override {
@@ -1206,7 +1206,7 @@ static bool isSupportedMemSinkElementType(Type type) {
/// ```
class ExtractOpFromLoad final : public OpRewritePattern<vector::ExtractOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ExtractOp op,
PatternRewriter &rewriter) const override {
@@ -1285,7 +1285,7 @@ public:
class StoreOpFromSplatOrBroadcast final
: public OpRewritePattern<vector::StoreOp> {
public:
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::StoreOp op,
PatternRewriter &rewriter) const override {
@@ -1476,7 +1476,7 @@ static bool allI1ConstantValuesSetTo(arith::ConstantOp constantOp, bool value) {
/// InstCombine seems to handle vectors with multiple elements but not the
/// single element ones.
struct FoldI1Select : public OpRewritePattern<arith::SelectOp> {
- using OpRewritePattern<arith::SelectOp>::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(arith::SelectOp selectOp,
PatternRewriter &rewriter) const override {
@@ -1560,7 +1560,7 @@ getTransferFoldableInnerUnitDims(MemRefType srcType, VectorType vectorType) {
/// Drop inner most contiguous unit dimensions from transfer_read operand.
class DropInnerMostUnitDimsTransferRead
: public OpRewritePattern<vector::TransferReadOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::TransferReadOp readOp,
PatternRewriter &rewriter) const override {
@@ -1651,7 +1651,7 @@ class DropInnerMostUnitDimsTransferRead
/// Note, this pattern will not collapse "scalable unit" dims (i.e. `[1]`).
class DropInnerMostUnitDimsTransferWrite
: public OpRewritePattern<vector::TransferWriteOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::TransferWriteOp writeOp,
PatternRewriter &rewriter) const override {
@@ -1728,7 +1728,7 @@ class DropInnerMostUnitDimsTransferWrite
/// with the RHS transposed) lowering.
struct CanonicalizeContractMatmulToMMT final
: OpRewritePattern<vector::ContractionOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
using FilterConstraintType =
std::function<LogicalResult(vector::ContractionOp op)>;
@@ -1845,7 +1845,7 @@ private:
template <typename ExtOp>
struct FoldArithExtIntoContractionOp
: public OpRewritePattern<vector::ContractionOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ContractionOp contractOp,
PatternRewriter &rewriter) const override {
@@ -1878,7 +1878,7 @@ struct FoldArithExtIntoContractionOp
/// %b = vector.reduction <add> %a, %acc
/// ```
struct ChainedReduction final : OpRewritePattern<vector::ReductionOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ReductionOp op,
PatternRewriter &rewriter) const override {
@@ -2033,7 +2033,7 @@ struct DropUnitDimFromElementwiseOps final
/// ```
struct DropUnitDimsFromTransposeOp final
: OpRewritePattern<vector::TransposeOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::TransposeOp op,
PatternRewriter &rewriter) const override {
@@ -2110,7 +2110,7 @@ struct DropUnitDimsFromTransposeOp final
/// : vector<[4]x4xf32> to vector<[4]x1x1x4xf32>
/// ```
struct DropUnitDimsFromScfForOp final : OpRewritePattern<scf::ForOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(scf::ForOp forOp,
PatternRewriter &rewriter) const override {
@@ -2155,7 +2155,7 @@ struct DropUnitDimsFromScfForOp final : OpRewritePattern<scf::ForOp> {
/// %c = vector.reduction <add> %b, %acc
/// ```
struct ReduceRedundantZero final : OpRewritePattern<vector::ReductionOp> {
- using OpRewritePattern::OpRewritePattern;
+ using Base::Base;
LogicalResult matchAndRewrite(vector::ReductionOp op,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
index 9413a92..784e5d6 100644
--- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
+++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp
@@ -824,7 +824,7 @@ struct WgToSgStoreScatterOpWithOffset
return failure();
xegpu::DistributeLayoutAttr layout =
- xegpu::getDistributeLayoutAttr(op.getValue());
+ xegpu::getDistributeLayoutAttr(op.getOperand(0));
if (!layout || !layout.isForWorkgroup())
return failure();
@@ -844,12 +844,19 @@ struct WgToSgStoreScatterOpWithOffset
auto chunkSizeAttr = rewriter.getI64IntegerAttr(chunkSize);
for (auto [val, offs, mask] : llvm::zip(
adaptor.getValue(), adaptor.getOffsets(), adaptor.getMask())) {
- xegpu::StoreScatterOp::create(rewriter, loc, val, op.getDest(), offs,
- mask, chunkSizeAttr, op.getL1HintAttr(),
- op.getL2HintAttr(), op.getL3HintAttr());
+ auto store = xegpu::StoreScatterOp::create(
+ rewriter, loc, val, op.getDest(), offs, mask, chunkSizeAttr,
+ op.getL1HintAttr(), op.getL2HintAttr(), op.getL3HintAttr());
// Update the layout attribute to drop sg_layout and sg_data.
- if (auto newLayout = layout.dropSgLayoutAndData())
- op->setAttr("layout", newLayout);
+ if (!layout.getEffectiveLaneLayoutAsInt().empty() ||
+ !layout.getEffectiveInstDataAsInt().empty()) {
+ for (OpOperand &operand : store->getOpOperands()) {
+ // Skip for operand one (memref)
+ if (operand.getOperandNumber() == 1)
+ continue;
+ xegpu::setDistributeLayoutAttr(operand, layout.dropSgLayoutAndData());
+ }
+ }
}
rewriter.eraseOp(op);
return success();
@@ -1247,10 +1254,7 @@ void XeGPUWgToSgDistributePass::runOnOperation() {
target.addDynamicallyLegalOp<xegpu::StoreScatterOp>(
[=](xegpu::StoreScatterOp op) -> bool {
- // Check if the layout attribute is present on the result.
- auto layout = op->getAttrOfType<xegpu::LayoutAttr>("layout");
- if (!layout)
- return true;
+ auto layout = xegpu::getDistributeLayoutAttr(op.getOperand(0));
return isLegal(layout);
});
diff --git a/mlir/lib/IR/Builders.cpp b/mlir/lib/IR/Builders.cpp
index c84e760..8f199b6 100644
--- a/mlir/lib/IR/Builders.cpp
+++ b/mlir/lib/IR/Builders.cpp
@@ -489,13 +489,6 @@ OpBuilder::tryFold(Operation *op, SmallVectorImpl<Value> &results,
SmallVector<OpFoldResult, 4> foldResults;
LDBG() << "Trying to fold: "
<< OpWithFlags(op, OpPrintingFlags().skipRegions());
- if (op->getName().getStringRef() == "vector.extract") {
- Operation *parent = op->getParentOp();
- while (parent && parent->getName().getStringRef() != "spirv.func")
- parent = parent->getParentOp();
- if (parent)
- parent->dump();
- }
if (failed(op->fold(foldResults)))
return cleanupFailure();
diff --git a/mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp b/mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp
index af4ea5a..0f28cbc 100644
--- a/mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp
+++ b/mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp
@@ -304,7 +304,7 @@ static ConstantIntRanges inferDivURange(const ConstantIntRanges &lhs,
umin = lhsMin.udiv(rhsMax);
// X u/ Y u<= X.
- APInt umax = lhsMax;
+ const APInt &umax = lhsMax;
return ConstantIntRanges::fromUnsigned(umin, umax);
}
diff --git a/mlir/lib/Target/IRDLToCpp/IRDLToCpp.cpp b/mlir/lib/Target/IRDLToCpp/IRDLToCpp.cpp
index d6b8a8a..e3f075f 100644
--- a/mlir/lib/Target/IRDLToCpp/IRDLToCpp.cpp
+++ b/mlir/lib/Target/IRDLToCpp/IRDLToCpp.cpp
@@ -54,6 +54,7 @@ struct OpStrings {
std::string opCppName;
SmallVector<std::string> opResultNames;
SmallVector<std::string> opOperandNames;
+ SmallVector<std::string> opRegionNames;
};
static std::string joinNameList(llvm::ArrayRef<std::string> names) {
@@ -87,8 +88,8 @@ static TypeStrings getStrings(irdl::TypeOp type) {
/// Generates OpStrings from an OperatioOp
static OpStrings getStrings(irdl::OperationOp op) {
auto operandOp = op.getOp<irdl::OperandsOp>();
-
auto resultOp = op.getOp<irdl::ResultsOp>();
+ auto regionsOp = op.getOp<irdl::RegionsOp>();
OpStrings strings;
strings.opName = op.getSymName();
@@ -108,6 +109,13 @@ static OpStrings getStrings(irdl::OperationOp op) {
}));
}
+ if (regionsOp) {
+ strings.opRegionNames = SmallVector<std::string>(
+ llvm::map_range(regionsOp->getNames(), [](Attribute attr) {
+ return llvm::formatv("{0}", cast<StringAttr>(attr));
+ }));
+ }
+
return strings;
}
@@ -122,6 +130,7 @@ static void fillDict(irdl::detail::dictionary &dict,
static void fillDict(irdl::detail::dictionary &dict, const OpStrings &strings) {
const auto operandCount = strings.opOperandNames.size();
const auto resultCount = strings.opResultNames.size();
+ const auto regionCount = strings.opRegionNames.size();
dict["OP_NAME"] = strings.opName;
dict["OP_CPP_NAME"] = strings.opCppName;
@@ -131,6 +140,7 @@ static void fillDict(irdl::detail::dictionary &dict, const OpStrings &strings) {
operandCount ? joinNameList(strings.opOperandNames) : "{\"\"}";
dict["OP_RESULT_INITIALIZER_LIST"] =
resultCount ? joinNameList(strings.opResultNames) : "{\"\"}";
+ dict["OP_REGION_COUNT"] = std::to_string(regionCount);
}
/// Fills a dictionary with values from DialectStrings
@@ -179,6 +189,8 @@ static void generateOpGetterDeclarations(irdl::detail::dictionary &dict,
const OpStrings &opStrings) {
auto opGetters = std::string{};
auto resGetters = std::string{};
+ auto regionGetters = std::string{};
+ auto regionAdaptorGetters = std::string{};
for (size_t i = 0, end = opStrings.opOperandNames.size(); i < end; ++i) {
const auto op =
@@ -196,8 +208,23 @@ static void generateOpGetterDeclarations(irdl::detail::dictionary &dict,
op, i);
}
+ for (size_t i = 0, end = opStrings.opRegionNames.size(); i < end; ++i) {
+ const auto op =
+ llvm::convertToCamelFromSnakeCase(opStrings.opRegionNames[i], true);
+ regionAdaptorGetters += llvm::formatv(
+ R"(::mlir::Region &get{0}() { return *getRegions()[{1}]; }
+ )",
+ op, i);
+ regionGetters += llvm::formatv(
+ R"(::mlir::Region &get{0}() { return (*this)->getRegion({1}); }
+ )",
+ op, i);
+ }
+
dict["OP_OPERAND_GETTER_DECLS"] = opGetters;
dict["OP_RESULT_GETTER_DECLS"] = resGetters;
+ dict["OP_REGION_ADAPTER_GETTER_DECLS"] = regionAdaptorGetters;
+ dict["OP_REGION_GETTER_DECLS"] = regionGetters;
}
static void generateOpBuilderDeclarations(irdl::detail::dictionary &dict,
@@ -238,6 +265,22 @@ static void generateOpBuilderDeclarations(irdl::detail::dictionary &dict,
dict["OP_BUILD_DECLS"] = buildDecls;
}
+// add traits to the dictionary, return true if any were added
+static SmallVector<std::string> generateTraits(irdl::OperationOp op,
+ const OpStrings &strings) {
+ SmallVector<std::string> cppTraitNames;
+ if (!strings.opRegionNames.empty()) {
+ cppTraitNames.push_back(
+ llvm::formatv("::mlir::OpTrait::NRegions<{0}>::Impl",
+ strings.opRegionNames.size())
+ .str());
+
+ // Requires verifyInvariantsImpl is implemented on the op
+ cppTraitNames.emplace_back("::mlir::OpTrait::OpInvariants");
+ }
+ return cppTraitNames;
+}
+
static LogicalResult generateOperationInclude(irdl::OperationOp op,
raw_ostream &output,
irdl::detail::dictionary &dict) {
@@ -247,6 +290,13 @@ static LogicalResult generateOperationInclude(irdl::OperationOp op,
const auto opStrings = getStrings(op);
fillDict(dict, opStrings);
+ SmallVector<std::string> traitNames = generateTraits(op, opStrings);
+ if (traitNames.empty())
+ dict["OP_TEMPLATE_ARGS"] = opStrings.opCppName;
+ else
+ dict["OP_TEMPLATE_ARGS"] = llvm::formatv("{0}, {1}", opStrings.opCppName,
+ llvm::join(traitNames, ", "));
+
generateOpGetterDeclarations(dict, opStrings);
generateOpBuilderDeclarations(dict, opStrings);
@@ -301,6 +351,110 @@ static LogicalResult generateInclude(irdl::DialectOp dialect,
return success();
}
+static void generateRegionConstraintVerifiers(
+ irdl::detail::dictionary &dict, irdl::OperationOp op,
+ const OpStrings &strings, SmallVectorImpl<std::string> &verifierHelpers,
+ SmallVectorImpl<std::string> &verifierCalls) {
+ auto regionsOp = op.getOp<irdl::RegionsOp>();
+ if (strings.opRegionNames.empty() || !regionsOp)
+ return;
+
+ for (size_t i = 0; i < strings.opRegionNames.size(); ++i) {
+ std::string regionName = strings.opRegionNames[i];
+ std::string helperFnName =
+ llvm::formatv("__mlir_irdl_local_region_constraint_{0}_{1}",
+ strings.opCppName, regionName)
+ .str();
+
+ // Extract the actual region constraint from the IRDL RegionOp
+ std::string condition = "true";
+ std::string textualConditionName = "any region";
+
+ if (auto regionDefOp =
+ dyn_cast<irdl::RegionOp>(regionsOp->getArgs()[i].getDefiningOp())) {
+ // Generate constraint condition based on RegionOp attributes
+ SmallVector<std::string> conditionParts;
+ SmallVector<std::string> descriptionParts;
+
+ // Check number of blocks constraint
+ if (auto blockCount = regionDefOp.getNumberOfBlocks()) {
+ conditionParts.push_back(
+ llvm::formatv("region.getBlocks().size() == {0}",
+ blockCount.value())
+ .str());
+ descriptionParts.push_back(
+ llvm::formatv("exactly {0} block(s)", blockCount.value()).str());
+ }
+
+ // Check entry block arguments constraint
+ if (regionDefOp.getConstrainedArguments()) {
+ size_t expectedArgCount = regionDefOp.getEntryBlockArgs().size();
+ conditionParts.push_back(
+ llvm::formatv("region.getNumArguments() == {0}", expectedArgCount)
+ .str());
+ descriptionParts.push_back(
+ llvm::formatv("{0} entry block argument(s)", expectedArgCount)
+ .str());
+ }
+
+ // Combine conditions
+ if (!conditionParts.empty()) {
+ condition = llvm::join(conditionParts, " && ");
+ }
+
+ // Generate descriptive error message
+ if (!descriptionParts.empty()) {
+ textualConditionName =
+ llvm::formatv("region with {0}",
+ llvm::join(descriptionParts, " and "))
+ .str();
+ }
+ }
+
+ verifierHelpers.push_back(llvm::formatv(
+ R"(static ::llvm::LogicalResult {0}(::mlir::Operation *op, ::mlir::Region &region, ::llvm::StringRef regionName, unsigned regionIndex) {{
+ if (!({1})) {{
+ return op->emitOpError("region #") << regionIndex
+ << (regionName.empty() ? " " : " ('" + regionName + "') ")
+ << "failed to verify constraint: {2}";
+ }
+ return ::mlir::success();
+})",
+ helperFnName, condition, textualConditionName));
+
+ verifierCalls.push_back(llvm::formatv(R"(
+ if (::mlir::failed({0}(*this, (*this)->getRegion({1}), "{2}", {1})))
+ return ::mlir::failure();)",
+ helperFnName, i, regionName)
+ .str());
+ }
+}
+
+static void generateVerifiers(irdl::detail::dictionary &dict,
+ irdl::OperationOp op, const OpStrings &strings) {
+ SmallVector<std::string> verifierHelpers;
+ SmallVector<std::string> verifierCalls;
+
+ generateRegionConstraintVerifiers(dict, op, strings, verifierHelpers,
+ verifierCalls);
+
+ // Add an overall verifier that sequences the helper calls
+ std::string verifierDef =
+ llvm::formatv(R"(
+::llvm::LogicalResult {0}::verifyInvariantsImpl() {{
+ if(::mlir::failed(verify()))
+ return ::mlir::failure();
+
+ {1}
+
+ return ::mlir::success();
+})",
+ strings.opCppName, llvm::join(verifierCalls, "\n"));
+
+ dict["OP_VERIFIER_HELPERS"] = llvm::join(verifierHelpers, "\n");
+ dict["OP_VERIFIER"] = verifierDef;
+}
+
static std::string generateOpDefinition(irdl::detail::dictionary &dict,
irdl::OperationOp op) {
static const auto perOpDefTemplate = mlir::irdl::detail::Template{
@@ -370,6 +524,8 @@ void {0}::build(::mlir::OpBuilder &opBuilder, ::mlir::OperationState &opState, {
dict["OP_BUILD_DEFS"] = buildDefinition;
+ generateVerifiers(dict, op, opStrings);
+
std::string str;
llvm::raw_string_ostream stream{str};
perOpDefTemplate.render(stream, dict);
@@ -427,7 +583,7 @@ static LogicalResult generateLib(irdl::DialectOp dialect, raw_ostream &output,
dict["TYPE_PARSER"] = llvm::formatv(
R"(static ::mlir::OptionalParseResult generatedTypeParser(::mlir::AsmParser &parser, ::llvm::StringRef *mnemonic, ::mlir::Type &value) {
return ::mlir::AsmParser::KeywordSwitch<::mlir::OptionalParseResult>(parser)
- {0}
+ {0}
.Default([&](llvm::StringRef keyword, llvm::SMLoc) {{
*mnemonic = keyword;
return std::nullopt;
@@ -520,6 +676,8 @@ static LogicalResult verifySupported(irdl::DialectOp dialect) {
"IRDL C++ translation does not yet support variadic results");
}))
.Case<irdl::AnyOp>(([](irdl::AnyOp) { return success(); }))
+ .Case<irdl::RegionOp>(([](irdl::RegionOp) { return success(); }))
+ .Case<irdl::RegionsOp>(([](irdl::RegionsOp) { return success(); }))
.Default([](mlir::Operation *op) -> LogicalResult {
return op->emitError("IRDL C++ translation does not yet support "
"translation of ")
diff --git a/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDecl.txt b/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDecl.txt
index e9068e9..93ce0be 100644
--- a/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDecl.txt
+++ b/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDecl.txt
@@ -12,15 +12,15 @@ public:
struct Properties {
};
public:
- __OP_CPP_NAME__GenericAdaptorBase(::mlir::Operation *op)
- : odsAttrs(op->getRawDictionaryAttrs()), odsOpName(op->getName()),
- odsRegions(op->getRegions())
+ __OP_CPP_NAME__GenericAdaptorBase(::mlir::Operation *op)
+ : odsAttrs(op->getRawDictionaryAttrs()), odsOpName(op->getName()),
+ odsRegions(op->getRegions())
{}
/// Return the unstructured operand index of a structured operand along with
// the amount of unstructured operands it contains.
std::pair<unsigned, unsigned>
- getStructuredOperandIndexAndLength (unsigned index,
+ getStructuredOperandIndexAndLength (unsigned index,
unsigned odsOperandsSize) {
return {index, 1};
}
@@ -32,6 +32,12 @@ public:
::mlir::DictionaryAttr getAttributes() {
return odsAttrs;
}
+
+ __OP_REGION_ADAPTER_GETTER_DECLS__
+
+ ::mlir::RegionRange getRegions() {
+ return odsRegions;
+ }
protected:
::mlir::DictionaryAttr odsAttrs;
::std::optional<::mlir::OperationName> odsOpName;
@@ -42,28 +48,28 @@ protected:
} // namespace detail
template <typename RangeT>
-class __OP_CPP_NAME__GenericAdaptor
+class __OP_CPP_NAME__GenericAdaptor
: public detail::__OP_CPP_NAME__GenericAdaptorBase {
using ValueT = ::llvm::detail::ValueOfRange<RangeT>;
using Base = detail::__OP_CPP_NAME__GenericAdaptorBase;
public:
__OP_CPP_NAME__GenericAdaptor(RangeT values, ::mlir::DictionaryAttr attrs,
- ::mlir::OpaqueProperties properties,
- ::mlir::RegionRange regions = {})
- : __OP_CPP_NAME__GenericAdaptor(values, attrs,
- (properties ? *properties.as<::mlir::EmptyProperties *>()
+ ::mlir::OpaqueProperties properties,
+ ::mlir::RegionRange regions = {})
+ : __OP_CPP_NAME__GenericAdaptor(values, attrs,
+ (properties ? *properties.as<::mlir::EmptyProperties *>()
: ::mlir::EmptyProperties{}), regions) {}
- __OP_CPP_NAME__GenericAdaptor(RangeT values,
+ __OP_CPP_NAME__GenericAdaptor(RangeT values,
const __OP_CPP_NAME__GenericAdaptorBase &base)
: Base(base), odsOperands(values) {}
- // This template parameter allows using __OP_CPP_NAME__ which is declared
+ // This template parameter allows using __OP_CPP_NAME__ which is declared
// later.
template <typename LateInst = __OP_CPP_NAME__,
typename = std::enable_if_t<
std::is_same_v<LateInst, __OP_CPP_NAME__>>>
- __OP_CPP_NAME__GenericAdaptor(RangeT values, LateInst op)
+ __OP_CPP_NAME__GenericAdaptor(RangeT values, LateInst op)
: Base(op), odsOperands(values) {}
/// Return the unstructured operand index of a structured operand along with
@@ -77,7 +83,7 @@ public:
RangeT getStructuredOperands(unsigned index) {
auto valueRange = getStructuredOperandIndexAndLength(index);
return {std::next(odsOperands.begin(), valueRange.first),
- std::next(odsOperands.begin(),
+ std::next(odsOperands.begin(),
valueRange.first + valueRange.second)};
}
@@ -91,7 +97,7 @@ private:
RangeT odsOperands;
};
-class __OP_CPP_NAME__Adaptor
+class __OP_CPP_NAME__Adaptor
: public __OP_CPP_NAME__GenericAdaptor<::mlir::ValueRange> {
public:
using __OP_CPP_NAME__GenericAdaptor::__OP_CPP_NAME__GenericAdaptor;
@@ -100,7 +106,7 @@ public:
::llvm::LogicalResult verify(::mlir::Location loc);
};
-class __OP_CPP_NAME__ : public ::mlir::Op<__OP_CPP_NAME__> {
+class __OP_CPP_NAME__ : public ::mlir::Op<__OP_TEMPLATE_ARGS__> {
public:
using Op::Op;
using Op::print;
@@ -112,6 +118,8 @@ public:
return {};
}
+ ::llvm::LogicalResult verifyInvariantsImpl();
+
static constexpr ::llvm::StringLiteral getOperationName() {
return ::llvm::StringLiteral("__DIALECT_NAME__.__OP_NAME__");
}
@@ -147,7 +155,7 @@ public:
::mlir::Operation::operand_range getStructuredOperands(unsigned index) {
auto valueRange = getStructuredOperandIndexAndLength(index);
return {std::next(getOperation()->operand_begin(), valueRange.first),
- std::next(getOperation()->operand_begin(),
+ std::next(getOperation()->operand_begin(),
valueRange.first + valueRange.second)};
}
@@ -162,18 +170,19 @@ public:
::mlir::Operation::result_range getStructuredResults(unsigned index) {
auto valueRange = getStructuredResultIndexAndLength(index);
return {std::next(getOperation()->result_begin(), valueRange.first),
- std::next(getOperation()->result_begin(),
+ std::next(getOperation()->result_begin(),
valueRange.first + valueRange.second)};
}
__OP_OPERAND_GETTER_DECLS__
__OP_RESULT_GETTER_DECLS__
-
+ __OP_REGION_GETTER_DECLS__
+
__OP_BUILD_DECLS__
- static void build(::mlir::OpBuilder &odsBuilder,
- ::mlir::OperationState &odsState,
- ::mlir::TypeRange resultTypes,
- ::mlir::ValueRange operands,
+ static void build(::mlir::OpBuilder &odsBuilder,
+ ::mlir::OperationState &odsState,
+ ::mlir::TypeRange resultTypes,
+ ::mlir::ValueRange operands,
::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
static __OP_CPP_NAME__ create(::mlir::OpBuilder &odsBuilder,
diff --git a/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDef.txt b/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDef.txt
index 30ca420..f4a1b7a 100644
--- a/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDef.txt
+++ b/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDef.txt
@@ -6,12 +6,14 @@ R"(
__NAMESPACE_OPEN__
+__OP_VERIFIER_HELPERS__
+
__OP_BUILD_DEFS__
-void __OP_CPP_NAME__::build(::mlir::OpBuilder &odsBuilder,
- ::mlir::OperationState &odsState,
- ::mlir::TypeRange resultTypes,
- ::mlir::ValueRange operands,
+void __OP_CPP_NAME__::build(::mlir::OpBuilder &odsBuilder,
+ ::mlir::OperationState &odsState,
+ ::mlir::TypeRange resultTypes,
+ ::mlir::ValueRange operands,
::llvm::ArrayRef<::mlir::NamedAttribute> attributes)
{
assert(operands.size() == __OP_OPERAND_COUNT__);
@@ -19,6 +21,9 @@ void __OP_CPP_NAME__::build(::mlir::OpBuilder &odsBuilder,
odsState.addOperands(operands);
odsState.addAttributes(attributes);
odsState.addTypes(resultTypes);
+ for (unsigned i = 0; i != __OP_REGION_COUNT__; ++i) {
+ (void)odsState.addRegion();
+ }
}
__OP_CPP_NAME__
@@ -44,6 +49,7 @@ __OP_CPP_NAME__::create(::mlir::ImplicitLocOpBuilder &odsBuilder,
return create(odsBuilder, odsBuilder.getLoc(), resultTypes, operands, attributes);
}
+__OP_VERIFIER__
__NAMESPACE_CLOSE__
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 53209a4..9fcb02e 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -3175,6 +3175,45 @@ applyUnrollHeuristic(omp::UnrollHeuristicOp op, llvm::IRBuilderBase &builder,
return success();
}
+/// Apply a `#pragma omp tile` / `!$omp tile` transformation using the
+/// OpenMPIRBuilder.
+static LogicalResult applyTile(omp::TileOp op, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ llvm::OpenMPIRBuilder *ompBuilder = moduleTranslation.getOpenMPBuilder();
+ llvm::OpenMPIRBuilder::LocationDescription loc(builder);
+
+ SmallVector<llvm::CanonicalLoopInfo *> translatedLoops;
+ SmallVector<llvm::Value *> translatedSizes;
+
+ for (Value size : op.getSizes()) {
+ llvm::Value *translatedSize = moduleTranslation.lookupValue(size);
+ assert(translatedSize &&
+ "sizes clause arguments must already be translated");
+ translatedSizes.push_back(translatedSize);
+ }
+
+ for (Value applyee : op.getApplyees()) {
+ llvm::CanonicalLoopInfo *consBuilderCLI =
+ moduleTranslation.lookupOMPLoop(applyee);
+ assert(applyee && "Canonical loop must already been translated");
+ translatedLoops.push_back(consBuilderCLI);
+ }
+
+ auto generatedLoops =
+ ompBuilder->tileLoops(loc.DL, translatedLoops, translatedSizes);
+ if (!op.getGeneratees().empty()) {
+ for (auto [mlirLoop, genLoop] :
+ zip_equal(op.getGeneratees(), generatedLoops))
+ moduleTranslation.mapOmpLoop(mlirLoop, genLoop);
+ }
+
+ // CLIs can only be consumed once
+ for (Value applyee : op.getApplyees())
+ moduleTranslation.invalidateOmpLoop(applyee);
+
+ return success();
+}
+
/// Convert an Atomic Ordering attribute to llvm::AtomicOrdering.
static llvm::AtomicOrdering
convertAtomicOrdering(std::optional<omp::ClauseMemoryOrderKind> ao) {
@@ -6227,6 +6266,9 @@ convertHostOrTargetOperation(Operation *op, llvm::IRBuilderBase &builder,
// the omp.canonical_loop.
return applyUnrollHeuristic(op, builder, moduleTranslation);
})
+ .Case([&](omp::TileOp op) {
+ return applyTile(op, builder, moduleTranslation);
+ })
.Case([&](omp::TargetAllocMemOp) {
return convertTargetAllocMemOp(*op, builder, moduleTranslation);
})
diff --git a/mlir/python/mlir/dialects/transform/structured.py b/mlir/python/mlir/dialects/transform/structured.py
index bf40cc5..e3bacb5 100644
--- a/mlir/python/mlir/dialects/transform/structured.py
+++ b/mlir/python/mlir/dialects/transform/structured.py
@@ -44,18 +44,12 @@ class BufferizeToAllocationOp(BufferizeToAllocationOp):
loc=None,
ip=None,
):
- # No other types are allowed, so hard-code those here.
- allocated_buffer_type = transform.AnyValueType.get()
- new_ops_type = transform.AnyOpType.get()
-
if isinstance(memory_space, int):
memory_space = str(memory_space)
if isinstance(memory_space, str):
memory_space = Attribute.parse(memory_space)
super().__init__(
- allocated_buffer_type,
- new_ops_type,
target,
memory_space=memory_space,
memcpy_op=memcpy_op,
diff --git a/mlir/python/mlir/dialects/transform/tune.py b/mlir/python/mlir/dialects/transform/tune.py
index f63f88a..b3bfa80 100644
--- a/mlir/python/mlir/dialects/transform/tune.py
+++ b/mlir/python/mlir/dialects/transform/tune.py
@@ -6,6 +6,9 @@ from typing import Optional, Sequence
from ...ir import (
Type,
+ Value,
+ Operation,
+ OpView,
Attribute,
ArrayAttr,
StringAttr,
@@ -19,7 +22,10 @@ from .._transform_tune_extension_ops_gen import *
from .._transform_tune_extension_ops_gen import _Dialect
try:
- from .._ods_common import _cext as _ods_cext
+ from .._ods_common import (
+ get_op_result_or_value as _get_op_result_or_value,
+ _cext as _ods_cext,
+ )
except ImportError as e:
raise RuntimeError("Error loading imports from extension module") from e
@@ -36,7 +42,7 @@ class KnobOp(KnobOp):
ArrayAttr, Sequence[Union[Attribute, bool, int, float, str]], Attribute
],
*,
- selected: Optional[Attribute] = None,
+ selected: Optional[Union[Attribute, bool, int, float, str]] = None,
loc=None,
ip=None,
):
@@ -75,8 +81,62 @@ def knob(
ArrayAttr, Sequence[Union[Attribute, bool, int, float, str]], Attribute
],
*,
- selected: Optional[Attribute] = None,
+ selected: Optional[Union[Attribute, bool, int, float, str]] = None,
loc=None,
ip=None,
):
return KnobOp(result, name, options, selected=selected, loc=loc, ip=ip)
+
+
+@_ods_cext.register_operation(_Dialect, replace=True)
+class AlternativesOp(AlternativesOp):
+ def __init__(
+ self,
+ results: Sequence[Type],
+ name: Union[StringAttr, str],
+ num_alternatives: int,
+ *,
+ selected_region: Optional[
+ Union[int, IntegerAttr, Value, Operation, OpView]
+ ] = None,
+ loc=None,
+ ip=None,
+ ):
+ if isinstance(name, str):
+ name = StringAttr.get(name)
+
+ selected_region_attr = selected_region_param = None
+ if isinstance(selected_region, IntegerAttr):
+ selected_region_attr = selected_region
+ elif isinstance(selected_region, int):
+ selected_region_attr = IntegerAttr.get(
+ IntegerType.get_signless(32), selected_region
+ )
+ elif isinstance(selected_region, (Value, Operation, OpView)):
+ selected_region_param = _get_op_result_or_value(selected_region)
+
+ super().__init__(
+ results,
+ name,
+ num_alternatives,
+ selected_region_attr=selected_region_attr,
+ selected_region_param=selected_region_param,
+ loc=loc,
+ ip=ip,
+ )
+ for region in self.regions:
+ region.blocks.append()
+
+
+def alternatives(
+ results: Sequence[Type],
+ name: Union[StringAttr, str],
+ num_alternatives: int,
+ *,
+ selected_region: Optional[Union[int, IntegerAttr, Value, Operation, OpView]] = None,
+ loc=None,
+ ip=None,
+):
+ return AlternativesOp(
+ results, name, num_alternatives, selected_region=selected_region, loc=loc, ip=ip
+ )
diff --git a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
index 45b1a1f..0cbe064 100644
--- a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
@@ -195,6 +195,36 @@ func.func @assume_alignment(%0 : memref<4x4xf16>) {
// -----
+// ALL-LABEL: func @distinct_objects
+// ALL-SAME: (%[[ARG0:.*]]: memref<?xf16>, %[[ARG1:.*]]: memref<?xf32>, %[[ARG2:.*]]: memref<?xf64>)
+func.func @distinct_objects(%arg0: memref<?xf16>, %arg1: memref<?xf32>, %arg2: memref<?xf64>) -> (memref<?xf16>, memref<?xf32>, memref<?xf64>) {
+// ALL-DAG: %[[CAST_0:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<?xf16> to !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
+// ALL-DAG: %[[CAST_1:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : memref<?xf32> to !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
+// ALL-DAG: %[[CAST_2:.*]] = builtin.unrealized_conversion_cast %[[ARG2]] : memref<?xf64> to !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
+// ALL: %[[PTR_0:.*]] = llvm.extractvalue %[[CAST_0]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
+// ALL: %[[PTR_1:.*]] = llvm.extractvalue %[[CAST_1]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
+// ALL: %[[PTR_2:.*]] = llvm.extractvalue %[[CAST_2]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
+// ALL: %[[TRUE:.*]] = llvm.mlir.constant(true) : i1
+// ALL: llvm.intr.assume %[[TRUE]] ["separate_storage"(%[[PTR_0]], %[[PTR_1]] : !llvm.ptr, !llvm.ptr)] : i1
+// ALL: llvm.intr.assume %[[TRUE]] ["separate_storage"(%[[PTR_0]], %[[PTR_2]] : !llvm.ptr, !llvm.ptr)] : i1
+// ALL: llvm.intr.assume %[[TRUE]] ["separate_storage"(%[[PTR_1]], %[[PTR_2]] : !llvm.ptr, !llvm.ptr)] : i1
+ %1, %2, %3 = memref.distinct_objects %arg0, %arg1, %arg2 : memref<?xf16>, memref<?xf32>, memref<?xf64>
+ return %1, %2, %3 : memref<?xf16>, memref<?xf32>, memref<?xf64>
+}
+
+// -----
+
+// ALL-LABEL: func @distinct_objects_noop
+// ALL-SAME: (%[[ARG0:.*]]: memref<?xf16>)
+func.func @distinct_objects_noop(%arg0: memref<?xf16>) -> memref<?xf16> {
+// 1-operand version is noop
+// ALL-NEXT: return %[[ARG0]]
+ %1 = memref.distinct_objects %arg0 : memref<?xf16>
+ return %1 : memref<?xf16>
+}
+
+// -----
+
// CHECK-LABEL: func @assume_alignment_w_offset
// CHECK-INTERFACE-LABEL: func @assume_alignment_w_offset
func.func @assume_alignment_w_offset(%0 : memref<4x4xf16, strided<[?, ?], offset: ?>>) {
diff --git a/mlir/test/Dialect/Arith/canonicalize.mlir b/mlir/test/Dialect/Arith/canonicalize.mlir
index ca3de3a..2fe0995 100644
--- a/mlir/test/Dialect/Arith/canonicalize.mlir
+++ b/mlir/test/Dialect/Arith/canonicalize.mlir
@@ -2216,6 +2216,18 @@ func.func @test_mulf1(%arg0 : f32, %arg1 : f32) -> (f32) {
return %2 : f32
}
+// CHECK-LABEL: @test_mulf2(
+func.func @test_mulf2(%arg0 : f32) -> (f32, f32) {
+ // CHECK-DAG: %[[C0:.+]] = arith.constant 0.000000e+00 : f32
+ // CHECK-DAG: %[[C0n:.+]] = arith.constant -0.000000e+00 : f32
+ // CHECK-NEXT: return %[[C0]], %[[C0n]]
+ %c0 = arith.constant 0.0 : f32
+ %c0n = arith.constant -0.0 : f32
+ %0 = arith.mulf %c0, %arg0 fastmath<nnan,nsz> : f32
+ %1 = arith.mulf %c0n, %arg0 fastmath<nnan,nsz> : f32
+ return %0, %1 : f32, f32
+}
+
// -----
// CHECK-LABEL: @test_divf(
diff --git a/mlir/test/Dialect/Arith/emulate-unsupported-floats.mlir b/mlir/test/Dialect/Arith/emulate-unsupported-floats.mlir
index 99790cc..fcd004a 100644
--- a/mlir/test/Dialect/Arith/emulate-unsupported-floats.mlir
+++ b/mlir/test/Dialect/Arith/emulate-unsupported-floats.mlir
@@ -85,3 +85,14 @@ func.func @no_expansion(%x: f32) -> f32 {
%y = arith.addf %x, %c : f32
func.return %y : f32
}
+
+// -----
+
+func.func @no_promote_select(%c: i1, %x: bf16, %y: bf16) -> bf16 {
+// CHECK-LABEL: @no_promote_select
+// CHECK-SAME: (%[[C:.+]]: i1, %[[X:.+]]: bf16, %[[Y:.+]]: bf16)
+// CHECK: %[[Z:.+]] = arith.select %[[C]], %[[X]], %[[Y]] : bf16
+// CHECK: return %[[Z]]
+ %z = arith.select %c, %x, %y : bf16
+ func.return %z : bf16
+}
diff --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir b/mlir/test/Dialect/LLVMIR/rocdl.mlir
index 0bad151..6134695 100644
--- a/mlir/test/Dialect/LLVMIR/rocdl.mlir
+++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir
@@ -1068,6 +1068,38 @@ llvm.func @rocdl.cvt.scale.pk8(%i32: i32, %v2xi32: vector<2xi32>, %scale: i32) {
// -----
+// CHECK-LABEL: rocdl.cvt.scalef32.pk8
+llvm.func @rocdl.cvt.scalef32.pk8(%v8xf32: vector<8xf32>,
+ %v8xf16: vector<8xf16>,
+ %v8xbf16: vector<8xbf16>,
+ %scale: f32) {
+
+ // CHECK: rocdl.cvt.scalef32.pk8.fp8.f32
+ %0 = rocdl.cvt.scalef32.pk8.fp8.f32 %v8xf32, %scale : vector<2xi32>
+ // CHECK: rocdl.cvt.scalef32.pk8.bf8.f32
+ %1 = rocdl.cvt.scalef32.pk8.bf8.f32 %v8xf32, %scale : vector<2xi32>
+ // CHECK: rocdl.cvt.scalef32.pk8.fp4.f32
+ %2 = rocdl.cvt.scalef32.pk8.fp4.f32 %v8xf32, %scale : i32
+
+ // CHECK: rocdl.cvt.scalef32.pk8.fp8.f16
+ %3 = rocdl.cvt.scalef32.pk8.fp8.f16 %v8xf16, %scale : vector<2xi32>
+ // CHECK: rocdl.cvt.scalef32.pk8.bf8.f16
+ %4 = rocdl.cvt.scalef32.pk8.bf8.f16 %v8xf16, %scale : vector<2xi32>
+ // CHECK: rocdl.cvt.scalef32.pk8.fp4.f16
+ %5 = rocdl.cvt.scalef32.pk8.fp4.f16 %v8xf16, %scale : i32
+
+ // CHECK: rocdl.cvt.scalef32.pk8.fp8.bf16
+ %6 = rocdl.cvt.scalef32.pk8.fp8.bf16 %v8xbf16, %scale : vector<2xi32>
+ // CHECK: rocdl.cvt.scalef32.pk8.bf8.bf16
+ %7 = rocdl.cvt.scalef32.pk8.bf8.bf16 %v8xbf16, %scale : vector<2xi32>
+ // CHECK: rocdl.cvt.scalef32.pk8.fp4.bf16
+ %8 = rocdl.cvt.scalef32.pk8.fp4.bf16 %v8xbf16, %scale : i32
+
+ llvm.return
+}
+
+// -----
+
// CHECK-LABEL: rocdl.cvt.scale.pk16
llvm.func @rocdl.cvt.scale.pk16(%v3xi32: vector<3xi32>, %scale:i32) {
diff --git a/mlir/test/Dialect/Math/sincos-fusion.mlir b/mlir/test/Dialect/Math/sincos-fusion.mlir
new file mode 100644
index 0000000..29fb9f1
--- /dev/null
+++ b/mlir/test/Dialect/Math/sincos-fusion.mlir
@@ -0,0 +1,86 @@
+// RUN: mlir-opt -math-sincos-fusion %s | FileCheck %s
+
+// CHECK-LABEL: func.func @sincos_fusion(
+// CHECK-SAME: %[[ARG0:.*]]: f32,
+// CHECK-SAME: %[[ARG1:.*]]: f32) -> (f32, f32, f32, f32) {
+// CHECK: %[[VAL_0:.*]], %[[VAL_1:.*]] = math.sincos %[[ARG0]] : f32
+// CHECK: %[[VAL_2:.*]], %[[VAL_3:.*]] = math.sincos %[[ARG1]] : f32
+// CHECK: return %[[VAL_0]], %[[VAL_1]], %[[VAL_3]], %[[VAL_2]] : f32, f32, f32, f32
+// CHECK: }
+func.func @sincos_fusion(%arg0 : f32, %arg1 : f32) -> (f32, f32, f32, f32) {
+ %0 = math.sin %arg0 : f32
+ %1 = math.cos %arg0 : f32
+
+ %2 = math.cos %arg1 : f32
+ %3 = math.sin %arg1 : f32
+
+ func.return %0, %1, %2, %3 : f32, f32, f32, f32
+}
+
+func.func private @sink(%arg0 : f32)
+
+// CHECK: func.func private @sink(f32)
+// CHECK-LABEL: func.func @sincos_ensure_ssa_dominance(
+// CHECK-SAME: %[[ARG0:.*]]: f32,
+// CHECK-SAME: %[[ARG1:.*]]: f32) -> (f32, f32, f32, f32) {
+// CHECK: %[[VAL_0:.*]], %[[VAL_1:.*]] = math.sincos %[[ARG0]] : f32
+// CHECK: call @sink(%[[VAL_0]]) : (f32) -> ()
+// CHECK: %[[VAL_2:.*]], %[[VAL_3:.*]] = math.sincos %[[ARG1]] : f32
+// CHECK: call @sink(%[[VAL_3]]) : (f32) -> ()
+// CHECK: return %[[VAL_0]], %[[VAL_1]], %[[VAL_3]], %[[VAL_2]] : f32, f32, f32, f32
+// CHECK: }
+func.func @sincos_ensure_ssa_dominance(%arg0 : f32, %arg1 : f32) -> (f32, f32, f32, f32) {
+ %0 = math.sin %arg0 : f32
+ func.call @sink(%0) : (f32) -> ()
+ %1 = math.cos %arg0 : f32
+ %2 = math.cos %arg1 : f32
+ func.call @sink(%2) : (f32) -> ()
+ %3 = math.sin %arg1 : f32
+ func.return %0, %1, %2, %3 : f32, f32, f32, f32
+}
+
+// CHECK-LABEL: func.func @sincos_fusion_no_match_fmf(
+// CHECK-SAME: %[[ARG0:.*]]: f32) -> (f32, f32) {
+// CHECK: %[[VAL_0:.*]] = math.sin %[[ARG0]] fastmath<contract> : f32
+// CHECK: %[[VAL_1:.*]] = math.cos %[[ARG0]] : f32
+// CHECK: return %[[VAL_0]], %[[VAL_1]] : f32, f32
+// CHECK: }
+func.func @sincos_fusion_no_match_fmf(%arg0 : f32) -> (f32, f32) {
+ %0 = math.sin %arg0 fastmath<contract> : f32
+ %1 = math.cos %arg0 : f32
+ func.return %0, %1 : f32, f32
+}
+
+// CHECK-LABEL: func.func @sincos_no_fusion_different_block(
+// CHECK-SAME: %[[ARG0:.*]]: f32,
+// CHECK-SAME: %[[ARG1:.*]]: i1) -> f32 {
+// CHECK: %[[VAL_0:.*]] = scf.if %[[ARG1]] -> (f32) {
+// CHECK: %[[VAL_1:.*]] = math.sin %[[ARG0]] : f32
+// CHECK: scf.yield %[[VAL_1]] : f32
+// CHECK: } else {
+// CHECK: %[[VAL_2:.*]] = math.cos %[[ARG0]] : f32
+// CHECK: scf.yield %[[VAL_2]] : f32
+// CHECK: }
+// CHECK: return %[[VAL_0]] : f32
+// CHECK: }
+func.func @sincos_no_fusion_different_block(%arg0 : f32, %flag : i1) -> f32 {
+ %0 = scf.if %flag -> f32 {
+ %s = math.sin %arg0 : f32
+ scf.yield %s : f32
+ } else {
+ %c = math.cos %arg0 : f32
+ scf.yield %c : f32
+ }
+ func.return %0 : f32
+}
+
+// CHECK-LABEL: func.func @sincos_fusion_preserve_fastmath(
+// CHECK-SAME: %[[ARG0:.*]]: f32) -> (f32, f32) {
+// CHECK: %[[VAL_0:.*]], %[[VAL_1:.*]] = math.sincos %[[ARG0]] fastmath<contract> : f32
+// CHECK: return %[[VAL_0]], %[[VAL_1]] : f32, f32
+// CHECK: }
+func.func @sincos_fusion_preserve_fastmath(%arg0 : f32) -> (f32, f32) {
+ %0 = math.sin %arg0 fastmath<contract> : f32
+ %1 = math.cos %arg0 fastmath<contract> : f32
+ func.return %0, %1 : f32, f32
+}
diff --git a/mlir/test/Dialect/MemRef/invalid.mlir b/mlir/test/Dialect/MemRef/invalid.mlir
index 3f96d90..5ff2920 100644
--- a/mlir/test/Dialect/MemRef/invalid.mlir
+++ b/mlir/test/Dialect/MemRef/invalid.mlir
@@ -1169,3 +1169,19 @@ func.func @expand_shape_invalid_output_shape(
into memref<2x15x20xf32, strided<[60000, 4000, 2], offset: 100>>
return
}
+
+// -----
+
+func.func @distinct_objects_types_mismatch(%arg0: memref<?xf32>, %arg1: memref<?xi32>) -> (memref<?xi32>, memref<?xf32>) {
+ // expected-error @+1 {{operand types and result types must match}}
+ %0, %1 = "memref.distinct_objects"(%arg0, %arg1) : (memref<?xf32>, memref<?xi32>) -> (memref<?xi32>, memref<?xf32>)
+ return %0, %1 : memref<?xi32>, memref<?xf32>
+}
+
+// -----
+
+func.func @distinct_objects_0_operands() {
+ // expected-error @+1 {{expected at least one operand}}
+ "memref.distinct_objects"() : () -> ()
+ return
+}
diff --git a/mlir/test/Dialect/MemRef/ops.mlir b/mlir/test/Dialect/MemRef/ops.mlir
index 6c2298a..a90c950 100644
--- a/mlir/test/Dialect/MemRef/ops.mlir
+++ b/mlir/test/Dialect/MemRef/ops.mlir
@@ -302,6 +302,15 @@ func.func @assume_alignment(%0: memref<4x4xf16>) {
return
}
+// CHECK-LABEL: func @distinct_objects
+// CHECK-SAME: (%[[ARG0:.*]]: memref<?xf16>, %[[ARG1:.*]]: memref<?xf32>, %[[ARG2:.*]]: memref<?xf64>)
+func.func @distinct_objects(%arg0: memref<?xf16>, %arg1: memref<?xf32>, %arg2: memref<?xf64>) -> (memref<?xf16>, memref<?xf32>, memref<?xf64>) {
+ // CHECK: %[[RES:.*]]:3 = memref.distinct_objects %[[ARG0]], %[[ARG1]], %[[ARG2]] : memref<?xf16>, memref<?xf32>, memref<?xf64>
+ %1, %2, %3 = memref.distinct_objects %arg0, %arg1, %arg2 : memref<?xf16>, memref<?xf32>, memref<?xf64>
+ // CHECK: return %[[RES]]#0, %[[RES]]#1, %[[RES]]#2 : memref<?xf16>, memref<?xf32>, memref<?xf64>
+ return %1, %2, %3 : memref<?xf16>, memref<?xf32>, memref<?xf64>
+}
+
// CHECK-LABEL: func @expand_collapse_shape_static
func.func @expand_collapse_shape_static(
%arg0: memref<3x4x5xf32>,
diff --git a/mlir/test/Dialect/OpenMP/cli-canonical_loop.mlir b/mlir/test/Dialect/OpenMP/cli-canonical_loop.mlir
index adadb8b..0e9385e 100644
--- a/mlir/test/Dialect/OpenMP/cli-canonical_loop.mlir
+++ b/mlir/test/Dialect/OpenMP/cli-canonical_loop.mlir
@@ -1,5 +1,5 @@
-// RUN: mlir-opt %s | FileCheck %s
-// RUN: mlir-opt %s | mlir-opt | FileCheck %s
+// RUN: mlir-opt %s | FileCheck %s --enable-var-scope
+// RUN: mlir-opt %s | mlir-opt | FileCheck %s --enable-var-scope
// CHECK-LABEL: @omp_canonloop_raw(
@@ -24,10 +24,10 @@ func.func @omp_canonloop_raw(%tc : i32) -> () {
func.func @omp_canonloop_sequential_raw(%tc : i32) -> () {
// CHECK-NEXT: %canonloop_s0 = omp.new_cli
%canonloop_s0 = "omp.new_cli" () : () -> (!omp.cli)
- // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc]]) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv_s0 : i32 in range(%[[tc]]) {
"omp.canonical_loop" (%tc, %canonloop_s0) ({
^bb_first(%iv_first: i32):
- // CHECK-NEXT: = llvm.add %iv, %iv : i32
+ // CHECK-NEXT: = llvm.add %iv_s0, %iv_s0 : i32
%newval = llvm.add %iv_first, %iv_first : i32
// CHECK-NEXT: omp.terminator
omp.terminator
@@ -36,7 +36,7 @@ func.func @omp_canonloop_sequential_raw(%tc : i32) -> () {
// CHECK-NEXT: %canonloop_s1 = omp.new_cli
%canonloop_s1 = "omp.new_cli" () : () -> (!omp.cli)
- // CHECK-NEXT: omp.canonical_loop(%canonloop_s1) %iv : i32 in range(%[[tc]]) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_s1) %iv_s1 : i32 in range(%[[tc]]) {
"omp.canonical_loop" (%tc, %canonloop_s1) ({
^bb_second(%iv_second: i32):
// CHECK: omp.terminator
@@ -52,17 +52,17 @@ func.func @omp_canonloop_sequential_raw(%tc : i32) -> () {
// CHECK-LABEL: @omp_nested_canonloop_raw(
// CHECK-SAME: %[[tc_outer:.+]]: i32, %[[tc_inner:.+]]: i32)
func.func @omp_nested_canonloop_raw(%tc_outer : i32, %tc_inner : i32) -> () {
- // CHECK-NEXT: %canonloop_s0 = omp.new_cli
+ // CHECK-NEXT: %canonloop = omp.new_cli
%outer = "omp.new_cli" () : () -> (!omp.cli)
- // CHECK-NEXT: %canonloop_s0_s0 = omp.new_cli
+ // CHECK-NEXT: %canonloop_d1 = omp.new_cli
%inner = "omp.new_cli" () : () -> (!omp.cli)
- // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc_outer]]) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc_outer]]) {
"omp.canonical_loop" (%tc_outer, %outer) ({
^bb_outer(%iv_outer: i32):
- // CHECK-NEXT: omp.canonical_loop(%canonloop_s0_s0) %iv_0 : i32 in range(%[[tc_inner]]) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%[[tc_inner]]) {
"omp.canonical_loop" (%tc_inner, %inner) ({
^bb_inner(%iv_inner: i32):
- // CHECK-NEXT: = llvm.add %iv, %iv_0 : i32
+ // CHECK-NEXT: = llvm.add %iv, %iv_d1 : i32
%newval = llvm.add %iv_outer, %iv_inner: i32
// CHECK-NEXT: omp.terminator
omp.terminator
@@ -108,16 +108,24 @@ func.func @omp_canonloop_constant_pretty() -> () {
func.func @omp_canonloop_sequential_pretty(%tc : i32) -> () {
// CHECK-NEXT: %canonloop_s0 = omp.new_cli
%canonloop_s0 = omp.new_cli
- // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc]]) {
- omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%tc) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv_s0 : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop_s0) %iv_s0 : i32 in range(%tc) {
// CHECK-NEXT: omp.terminator
omp.terminator
}
// CHECK: %canonloop_s1 = omp.new_cli
%canonloop_s1 = omp.new_cli
- // CHECK-NEXT: omp.canonical_loop(%canonloop_s1) %iv : i32 in range(%[[tc]]) {
- omp.canonical_loop(%canonloop_s1) %iv_0 : i32 in range(%tc) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_s1) %iv_s1 : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop_s1) %iv_s1 : i32 in range(%tc) {
+ // CHECK-NEXT: omp.terminator
+ omp.terminator
+ }
+
+ // CHECK: %canonloop_s2 = omp.new_cli
+ %canonloop_s2 = omp.new_cli
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_s2) %iv_s2 : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop_s2) %iv_s2 : i32 in range(%tc) {
// CHECK-NEXT: omp.terminator
omp.terminator
}
@@ -126,17 +134,17 @@ func.func @omp_canonloop_sequential_pretty(%tc : i32) -> () {
}
-// CHECK-LABEL: @omp_canonloop_nested_pretty(
+// CHECK-LABEL: @omp_canonloop_2d_nested_pretty(
// CHECK-SAME: %[[tc:.+]]: i32)
-func.func @omp_canonloop_nested_pretty(%tc : i32) -> () {
- // CHECK-NEXT: %canonloop_s0 = omp.new_cli
- %canonloop_s0 = omp.new_cli
- // CHECK-NEXT: %canonloop_s0_s0 = omp.new_cli
- %canonloop_s0_s0 = omp.new_cli
- // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc]]) {
- omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%tc) {
- // CHECK-NEXT: omp.canonical_loop(%canonloop_s0_s0) %iv_0 : i32 in range(%[[tc]]) {
- omp.canonical_loop(%canonloop_s0_s0) %iv_0 : i32 in range(%tc) {
+func.func @omp_canonloop_2d_nested_pretty(%tc : i32) -> () {
+ // CHECK-NEXT: %canonloop = omp.new_cli
+ %canonloop = omp.new_cli
+ // CHECK-NEXT: %canonloop_d1 = omp.new_cli
+ %canonloop_d1 = omp.new_cli
+ // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%tc) {
// CHECK: omp.terminator
omp.terminator
}
@@ -147,6 +155,77 @@ func.func @omp_canonloop_nested_pretty(%tc : i32) -> () {
}
+// CHECK-LABEL: @omp_canonloop_3d_nested_pretty(
+// CHECK-SAME: %[[tc:.+]]: i32)
+func.func @omp_canonloop_3d_nested_pretty(%tc : i32) -> () {
+ // CHECK: %canonloop = omp.new_cli
+ %canonloop = omp.new_cli
+ // CHECK: %canonloop_d1 = omp.new_cli
+ %canonloop_d1 = omp.new_cli
+ // CHECK: %canonloop_d2 = omp.new_cli
+ %canonloop_d2 = omp.new_cli
+ // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop_d1) %iv_1d : i32 in range(%tc) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_d2) %iv_d2 : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop_d2) %iv_d2 : i32 in range(%tc) {
+ // CHECK-NEXT: omp.terminator
+ omp.terminator
+ // CHECK-NEXT: }
+ }
+ // CHECK-NEXT: omp.terminator
+ omp.terminator
+ // CHECK-NEXT: }
+ }
+ // CHECK-NEXT: omp.terminator
+ omp.terminator
+ }
+
+ return
+}
+
+
+// CHECK-LABEL: @omp_canonloop_sequential_nested_pretty(
+// CHECK-SAME: %[[tc:.+]]: i32)
+func.func @omp_canonloop_sequential_nested_pretty(%tc : i32) -> () {
+ // CHECK-NEXT: %canonloop_s0 = omp.new_cli
+ %canonloop_s0 = omp.new_cli
+ // CHECK-NEXT: %canonloop_s0_d1 = omp.new_cli
+ %canonloop_s0_d1 = omp.new_cli
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv_s0 : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop_s0) %iv_s0 : i32 in range(%tc) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_s0_d1) %iv_s0_d1 : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop_s0_d1) %iv_s0_d1 : i32 in range(%tc) {
+ // CHECK-NEXT: omp.terminator
+ omp.terminator
+ // CHECK-NEXT: }
+ }
+ // CHECK-NEXT: omp.terminator
+ omp.terminator
+ // CHECK-NEXT: }
+ }
+
+ // CHECK-NEXT: %canonloop_s1 = omp.new_cli
+ %canonloop_s1 = omp.new_cli
+ // CHECK-NEXT: %canonloop_s1_d1 = omp.new_cli
+ %canonloop_s1_d1 = omp.new_cli
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_s1) %iv_s1 : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop_s1) %iv_s1 : i32 in range(%tc) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_s1_d1) %iv_s1_d1 : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop_s1_d1) %iv_s1d1 : i32 in range(%tc) {
+ // CHECK-NEXT: omp.terminator
+ omp.terminator
+ // CHECK-NEXT: }
+ }
+ // CHECK-NEXT: omp.terminator
+ omp.terminator
+ }
+
+ return
+}
+
+
// CHECK-LABEL: @omp_newcli_unused(
// CHECK-SAME: )
func.func @omp_newcli_unused() -> () {
@@ -155,3 +234,74 @@ func.func @omp_newcli_unused() -> () {
// CHECK-NEXT: return
return
}
+
+
+// CHECK-LABEL: @omp_canonloop_multiregion_isolatedfromabove(
+func.func @omp_canonloop_multiregion_isolatedfromabove() -> () {
+ omp.private {type = firstprivate} @x.privatizer : !llvm.ptr init {
+ ^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
+ %c42_i32 = arith.constant 42: i32
+ // CHECK: omp.canonical_loop %iv : i32 in range(%c42_i32) {
+ omp.canonical_loop %iv1 : i32 in range(%c42_i32) {
+ omp.terminator
+ }
+ // CHECK: omp.yield
+ omp.yield(%arg0 : !llvm.ptr)
+ } copy {
+ ^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
+ %c42_i32 = arith.constant 42: i32
+ // CHECK: omp.canonical_loop %iv : i32 in range(%c42_i32) {
+ omp.canonical_loop %iv : i32 in range(%c42_i32) {
+ // CHECK: omp.canonical_loop %iv_d1 : i32 in range(%c42_i32) {
+ omp.canonical_loop %iv_d1 : i32 in range(%c42_i32) {
+ omp.terminator
+ }
+ omp.terminator
+ }
+ // CHECK: omp.yield
+ omp.yield(%arg0 : !llvm.ptr)
+ } dealloc {
+ ^bb0(%arg0: !llvm.ptr):
+ %c42_i32 = arith.constant 42: i32
+ // CHECK: omp.canonical_loop %iv_s0 : i32 in range(%c42_i32) {
+ omp.canonical_loop %iv_s0 : i32 in range(%c42_i32) {
+ omp.terminator
+ }
+ // CHECK: omp.canonical_loop %iv_s1 : i32 in range(%c42_i32) {
+ omp.canonical_loop %iv_s1 : i32 in range(%c42_i32) {
+ omp.terminator
+ }
+ // CHECK: omp.yield
+ omp.yield
+ }
+
+ // CHECK: return
+ return
+}
+
+
+// CHECK-LABEL: @omp_canonloop_multiregion(
+func.func @omp_canonloop_multiregion(%c : i1) -> () {
+ %c42_i32 = arith.constant 42: i32
+ %canonloop1 = omp.new_cli
+ %canonloop2 = omp.new_cli
+ %canonloop3 = omp.new_cli
+ scf.if %c {
+ // CHECK: omp.canonical_loop(%canonloop_r0) %iv_r0 : i32 in range(%c42_i32) {
+ omp.canonical_loop(%canonloop1) %iv1 : i32 in range(%c42_i32) {
+ omp.terminator
+ }
+ } else {
+ // CHECK: omp.canonical_loop(%canonloop_r1_s0) %iv_r1_s0 : i32 in range(%c42_i32) {
+ omp.canonical_loop(%canonloop2) %iv2 : i32 in range(%c42_i32) {
+ omp.terminator
+ }
+ // CHECK: omp.canonical_loop(%canonloop_r1_s1) %iv_r1_s1 : i32 in range(%c42_i32) {
+ omp.canonical_loop(%canonloop3) %iv3 : i32 in range(%c42_i32) {
+ omp.terminator
+ }
+ }
+
+ // CHECK: return
+ return
+}
diff --git a/mlir/test/Dialect/OpenMP/cli-tile.mlir b/mlir/test/Dialect/OpenMP/cli-tile.mlir
new file mode 100644
index 0000000..73d5478
--- /dev/null
+++ b/mlir/test/Dialect/OpenMP/cli-tile.mlir
@@ -0,0 +1,138 @@
+// RUN: mlir-opt %s | FileCheck %s --enable-var-scope
+// RUN: mlir-opt %s | mlir-opt | FileCheck %s --enable-var-scope
+
+
+// Raw syntax check (MLIR output is always pretty-printed)
+// CHECK-LABEL: @omp_tile_raw(
+// CHECK-SAME: %[[tc:.+]]: i32, %[[ts:.+]]: i32) {
+func.func @omp_tile_raw(%tc : i32, %ts : i32) -> () {
+ // CHECK-NEXT: %canonloop = omp.new_cli
+ %canonloop = "omp.new_cli" () : () -> (!omp.cli)
+ // CHECK-NEXT: %grid1 = omp.new_cli
+ %grid = "omp.new_cli" () : () -> (!omp.cli)
+ // CHECK-NEXT: %intratile1 = omp.new_cli
+ %intratile = "omp.new_cli" () : () -> (!omp.cli)
+ // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) {
+ "omp.canonical_loop" (%tc, %canonloop) ({
+ ^bb0(%iv: i32):
+ // CHECK: omp.terminator
+ omp.terminator
+ }) : (i32, !omp.cli) -> ()
+ // CHECK: omp.tile (%grid1, %intratile1) <- (%canonloop) sizes(%[[ts]] : i32)
+ "omp.tile"(%grid, %intratile, %canonloop, %ts) <{operandSegmentSizes = array<i32: 2, 1, 1>}> : (!omp.cli, !omp.cli, !omp.cli, i32) -> ()
+ //"omp.tile" (%canonloop) : (!omp.cli) -> ()
+ return
+}
+
+
+// Pretty syntax check
+// CHECK-LABEL: @omp_tile_pretty(
+// CHECK-SAME: %[[tc:.+]]: i32, %[[ts:.+]]: i32) {
+func.func @omp_tile_pretty(%tc : i32, %ts : i32) -> () {
+ // CHECK-NEXT: %[[CANONLOOP:.+]] = omp.new_cli
+ %canonloop = omp.new_cli
+ // CHECK-NEXT: %[[CANONLOOP:.+]] = omp.new_cli
+ %grid = omp.new_cli
+ // CHECK-NEXT: %[[CANONLOOP:.+]] = omp.new_cli
+ %intratile = omp.new_cli
+ // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) {
+ // CHECK: omp.terminator
+ omp.terminator
+ }
+ // CHECK: omp.tile (%grid1, %intratile1) <- (%canonloop) sizes(%[[ts]] : i32)
+ omp.tile(%grid, %intratile) <- (%canonloop) sizes(%ts : i32)
+ return
+}
+
+
+// Specifying the generatees for omp.tile is optional
+// CHECK-LABEL: @omp_tile_optionalgen_pretty(
+// CHECK-SAME: %[[tc:.+]]: i32, %[[ts:.+]]: i32) {
+func.func @omp_tile_optionalgen_pretty(%tc : i32, %ts : i32) -> () {
+ // CHECK-NEXT: %canonloop = omp.new_cli
+ %canonloop = omp.new_cli
+ // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) {
+ // CHECK: omp.terminator
+ omp.terminator
+ }
+ // CHECK: omp.tile <- (%canonloop) sizes(%[[ts]] : i32)
+ omp.tile <- (%canonloop) sizes(%ts : i32)
+ return
+}
+
+
+// Two-dimensional tiling
+// CHECK-LABEL: @omp_tile_2d_pretty(
+// CHECK-SAME: %[[tc1:.+]]: i32, %[[tc2:.+]]: i32, %[[ts1:.+]]: i32, %[[ts2:.+]]: i32) {
+func.func @omp_tile_2d_pretty(%tc1 : i32, %tc2 : i32, %ts1 : i32, %ts2 : i32) -> () {
+ // CHECK-NEXT: %canonloop = omp.new_cli
+ %cli_outer = omp.new_cli
+ // CHECK-NEXT: %canonloop_d1 = omp.new_cli
+ %cli_inner = omp.new_cli
+ // CHECK-NEXT: %grid1 = omp.new_cli
+ %grid1 = omp.new_cli
+ // CHECK-NEXT: %grid2 = omp.new_cli
+ %grid2 = omp.new_cli
+ // CHECK-NEXT: %intratile1 = omp.new_cli
+ %intratile1 = omp.new_cli
+ // CHECK-NEXT: %intratile2 = omp.new_cli
+ %intratile2 = omp.new_cli
+ // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc1]]) {
+ omp.canonical_loop(%cli_outer) %iv_outer : i32 in range(%tc1) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%[[tc2]]) {
+ omp.canonical_loop(%cli_inner) %iv_inner : i32 in range(%tc2) {
+ // CHECK: omp.terminator
+ omp.terminator
+ }
+ // CHECK: omp.terminator
+ omp.terminator
+ }
+ // CHECK: omp.tile (%grid1, %grid2, %intratile1, %intratile2) <- (%canonloop, %canonloop_d1) sizes(%[[ts1]], %[[ts2]] : i32, i32)
+ omp.tile (%grid1, %grid2, %intratile1, %intratile2) <- (%cli_outer, %cli_inner) sizes(%ts1, %ts2 : i32, i32)
+ return
+}
+
+
+// Three-dimensional tiling
+// CHECK-LABEL: @omp_tile_3d_pretty(
+// CHECK-SAME: %[[tc:.+]]: i32, %[[ts:.+]]: i32) {
+func.func @omp_tile_3d_pretty(%tc : i32, %ts : i32) -> () {
+ // CHECK-NEXT: %canonloop = omp.new_cli
+ %cli_outer = omp.new_cli
+ // CHECK-NEXT: %canonloop_d1 = omp.new_cli
+ %cli_middle = omp.new_cli
+ // CHECK-NEXT: %canonloop_d2 = omp.new_cli
+ %cli_inner = omp.new_cli
+ // CHECK-NEXT: %grid1 = omp.new_cli
+ %grid1 = omp.new_cli
+ // CHECK-NEXT: %grid2 = omp.new_cli
+ %grid2 = omp.new_cli
+ // CHECK-NEXT: %grid3 = omp.new_cli
+ %grid3 = omp.new_cli
+ // CHECK-NEXT: %intratile1 = omp.new_cli
+ %intratile1 = omp.new_cli
+ // CHECK-NEXT: %intratile2 = omp.new_cli
+ %intratile2 = omp.new_cli
+ // CHECK-NEXT: %intratile3 = omp.new_cli
+ %intratile3 = omp.new_cli
+ // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%cli_outer) %iv_outer : i32 in range(%tc) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%cli_middle) %iv_middle : i32 in range(%tc) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_d2) %iv_d2 : i32 in range(%[[tc]]) {
+ omp.canonical_loop(%cli_inner) %iv_inner : i32 in range(%tc) {
+ // CHECK: omp.terminator
+ omp.terminator
+ }
+ // CHECK: omp.terminator
+ omp.terminator
+ }
+ // CHECK: omp.terminator
+ omp.terminator
+ }
+ // CHECK: omp.tile (%grid1, %grid2, %grid3, %intratile1, %intratile2, %intratile3) <- (%canonloop, %canonloop_d1, %canonloop_d2) sizes(%[[ts]], %[[ts]], %[[ts]] : i32, i32, i32)
+ omp.tile (%grid1, %grid2, %grid3, %intratile1, %intratile2, %intratile3) <- (%cli_outer, %cli_middle, %cli_inner) sizes(%ts, %ts, %ts: i32, i32, i32)
+ return
+}
diff --git a/mlir/test/Dialect/OpenMP/cli-unroll-heuristic.mlir b/mlir/test/Dialect/OpenMP/cli-unroll-heuristic.mlir
index cda7d0b..16884f4 100644
--- a/mlir/test/Dialect/OpenMP/cli-unroll-heuristic.mlir
+++ b/mlir/test/Dialect/OpenMP/cli-unroll-heuristic.mlir
@@ -1,18 +1,18 @@
-// RUN: mlir-opt %s | FileCheck %s
-// RUN: mlir-opt %s | mlir-opt | FileCheck %s
+// RUN: mlir-opt %s | FileCheck %s --enable-var-scope
+// RUN: mlir-opt %s | mlir-opt | FileCheck %s --enable-var-scope
// CHECK-LABEL: @omp_unroll_heuristic_raw(
// CHECK-SAME: %[[tc:.+]]: i32) {
func.func @omp_unroll_heuristic_raw(%tc : i32) -> () {
- // CHECK-NEXT: %canonloop_s0 = omp.new_cli
+ // CHECK-NEXT: %canonloop = omp.new_cli
%canonloop = "omp.new_cli" () : () -> (!omp.cli)
- // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc]]) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) {
"omp.canonical_loop" (%tc, %canonloop) ({
^bb0(%iv: i32):
omp.terminator
}) : (i32, !omp.cli) -> ()
- // CHECK: omp.unroll_heuristic(%canonloop_s0)
+ // CHECK: omp.unroll_heuristic(%canonloop)
"omp.unroll_heuristic" (%canonloop) : (!omp.cli) -> ()
return
}
@@ -22,12 +22,12 @@ func.func @omp_unroll_heuristic_raw(%tc : i32) -> () {
// CHECK-SAME: %[[tc:.+]]: i32) {
func.func @omp_unroll_heuristic_pretty(%tc : i32) -> () {
// CHECK-NEXT: %[[CANONLOOP:.+]] = omp.new_cli
- %canonloop = "omp.new_cli" () : () -> (!omp.cli)
- // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc]]) {
+ %canonloop = omp.new_cli
+ // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) {
omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) {
omp.terminator
}
- // CHECK: omp.unroll_heuristic(%canonloop_s0)
+ // CHECK: omp.unroll_heuristic(%canonloop)
omp.unroll_heuristic(%canonloop)
return
}
@@ -36,13 +36,13 @@ func.func @omp_unroll_heuristic_pretty(%tc : i32) -> () {
// CHECK-LABEL: @omp_unroll_heuristic_nested_pretty(
// CHECK-SAME: %[[tc:.+]]: i32) {
func.func @omp_unroll_heuristic_nested_pretty(%tc : i32) -> () {
- // CHECK-NEXT: %canonloop_s0 = omp.new_cli
+ // CHECK-NEXT: %canonloop = omp.new_cli
%cli_outer = omp.new_cli
- // CHECK-NEXT: %canonloop_s0_s0 = omp.new_cli
+ // CHECK-NEXT: %canonloop_d1 = omp.new_cli
%cli_inner = omp.new_cli
- // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc]]) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) {
omp.canonical_loop(%cli_outer) %iv_outer : i32 in range(%tc) {
- // CHECK-NEXT: omp.canonical_loop(%canonloop_s0_s0) %iv_0 : i32 in range(%[[tc]]) {
+ // CHECK-NEXT: omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%[[tc]]) {
omp.canonical_loop(%cli_inner) %iv_inner : i32 in range(%tc) {
// CHECK: omp.terminator
omp.terminator
@@ -51,9 +51,9 @@ func.func @omp_unroll_heuristic_nested_pretty(%tc : i32) -> () {
omp.terminator
}
- // CHECK: omp.unroll_heuristic(%canonloop_s0)
+ // CHECK: omp.unroll_heuristic(%canonloop)
omp.unroll_heuristic(%cli_outer)
- // CHECK-NEXT: omp.unroll_heuristic(%canonloop_s0_s0)
+ // CHECK-NEXT: omp.unroll_heuristic(%canonloop_d1)
omp.unroll_heuristic(%cli_inner)
return
}
diff --git a/mlir/test/Dialect/OpenMP/invalid-tile.mlir b/mlir/test/Dialect/OpenMP/invalid-tile.mlir
new file mode 100644
index 0000000..e63a062
--- /dev/null
+++ b/mlir/test/Dialect/OpenMP/invalid-tile.mlir
@@ -0,0 +1,119 @@
+// RUN: mlir-opt -split-input-file -verify-diagnostics %s
+
+
+func.func @missing_sizes(%tc : i32, %ts : i32) {
+ %canonloop = omp.new_cli
+ omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) {
+ omp.terminator
+ }
+
+ // expected-error@+1 {{'omp.tile' op there must be one tile size for each applyee}}
+ omp.tile <-(%canonloop)
+
+ llvm.return
+}
+
+// -----
+
+func.func @no_loop(%tc : i32, %ts : i32) {
+ // expected-error@+1 {{'omp.tile' op must apply to at least one loop}}
+ omp.tile <-()
+
+ return
+}
+
+// -----
+
+func.func @missing_generator(%tc : i32, %ts : i32) {
+ // expected-error@+1 {{'omp.new_cli' op CLI has no generator}}
+ %canonloop = omp.new_cli
+
+ // expected-note@+1 {{see consumer here: "omp.tile"(%0, %arg1) <{operandSegmentSizes = array<i32: 0, 1, 1>}> : (!omp.cli, i32) -> ()}}
+ omp.tile <-(%canonloop) sizes(%ts : i32)
+
+ return
+}
+
+// -----
+
+func.func @insufficient_sizes(%tc : i32, %ts : i32) {
+ %canonloop1 = omp.new_cli
+ %canonloop2 = omp.new_cli
+ omp.canonical_loop(%canonloop1) %iv : i32 in range(%tc) {
+ omp.terminator
+ }
+ omp.canonical_loop(%canonloop2) %iv : i32 in range(%tc) {
+ omp.terminator
+ }
+
+ // expected-error@+1 {{'omp.tile' op there must be one tile size for each applyee}}
+ omp.tile <-(%canonloop1, %canonloop2) sizes(%ts : i32)
+
+ llvm.return
+}
+
+// -----
+
+func.func @insufficient_applyees(%tc : i32, %ts : i32) {
+ %canonloop = omp.new_cli
+ omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) {
+ omp.terminator
+ }
+
+ // expected-error@+1 {{omp.tile' op there must be one tile size for each applyee}}
+ omp.tile <- (%canonloop) sizes(%ts, %ts : i32, i32)
+
+ return
+}
+
+// -----
+
+func.func @insufficient_generatees(%tc : i32, %ts : i32) {
+ %canonloop = omp.new_cli
+ %grid = omp.new_cli
+ omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) {
+ omp.terminator
+ }
+
+ // expected-error@+1 {{'omp.tile' op expecting two times the number of generatees than applyees}}
+ omp.tile (%grid) <- (%canonloop) sizes(%ts : i32)
+
+ return
+}
+
+// -----
+
+func.func @not_perfectly_nested(%tc : i32, %ts : i32) {
+ %canonloop1 = omp.new_cli
+ %canonloop2 = omp.new_cli
+ omp.canonical_loop(%canonloop1) %iv1 : i32 in range(%tc) {
+ %v = arith.constant 42 : i32
+ omp.canonical_loop(%canonloop2) %iv2 : i32 in range(%tc) {
+ omp.terminator
+ }
+ omp.terminator
+ }
+
+ // expected-error@+1 {{'omp.tile' op tiled loop nest must be perfectly nested}}
+ omp.tile <-(%canonloop1, %canonloop2) sizes(%ts, %ts : i32, i32)
+
+ llvm.return
+}
+
+// -----
+
+func.func @non_nectangular(%tc : i32, %ts : i32) {
+ %canonloop1 = omp.new_cli
+ %canonloop2 = omp.new_cli
+ omp.canonical_loop(%canonloop1) %iv1 : i32 in range(%tc) {
+ omp.canonical_loop(%canonloop2) %iv2 : i32 in range(%iv1) {
+ omp.terminator
+ }
+ omp.terminator
+ }
+
+ // expected-error@+1 {{'omp.tile' op tiled loop nest must be rectangular}}
+ omp.tile <-(%canonloop1, %canonloop2) sizes(%ts, %ts : i32, i32)
+
+ llvm.return
+}
diff --git a/mlir/test/Dialect/Transform/test-promote-tensors.mlir b/mlir/test/Dialect/Transform/test-promote-tensors.mlir
new file mode 100644
index 0000000..bc9a05a
--- /dev/null
+++ b/mlir/test/Dialect/Transform/test-promote-tensors.mlir
@@ -0,0 +1,104 @@
+// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s
+
+// CHECK-LABEL: @promote_in0
+// CHECK-SAME: (%[[ARG0:.+]]: tensor<?x42xf32>, %{{.*}}, %{{.*}})
+// CHECK: %[[C0:.+]] = arith.constant 0
+// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]]
+// CHECK: %[[ALLOC:.+]] = bufferization.alloc_tensor(%[[DIM]]) {memory_space = 1 : i64}
+// CHECK: %[[MAT:.+]] = bufferization.materialize_in_destination %[[ARG0]] in %[[ALLOC]]
+// CHECK: linalg.matmul ins(%[[MAT]], %{{.*}}
+func.func @promote_in0(%arg0: tensor<?x42xf32>, %arg1: tensor<42x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ %0 = linalg.matmul ins(%arg0, %arg1: tensor<?x42xf32>, tensor<42x?xf32>)
+ outs(%arg2: tensor<?x?xf32>) -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%root: !transform.any_op) {
+ %mm = transform.structured.match ops{["linalg.matmul"]} in %root
+ : (!transform.any_op) -> !transform.any_op
+ %op0 = transform.get_operand %mm[0]
+ : (!transform.any_op) -> !transform.any_value
+ transform.structured.promote_tensor to 1 %op0 : !transform.any_value
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: @promote_out
+// CHECK-SAME: (%{{.*}}: tensor<?x42xf32>, %{{.*}}: tensor<?x42xf32>, %[[ARG2:.+]]: tensor<?x?xf32>)
+func.func @promote_out(%arg0: tensor<?x42xf32>, %arg1: tensor<?x42xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ // CHECK: %[[C0:.+]] = arith.constant 0
+ // CHECK: %[[DIM0:.+]] = tensor.dim %[[ARG2]], %[[C0]]
+ // CHECK: %[[C1:.+]] = arith.constant 1
+ // CHECK: %[[DIM1:.+]] = tensor.dim %[[ARG2]], %[[C1]]
+ // CHECK: %[[ALLOC:.+]] = bufferization.alloc_tensor(%[[DIM0]], %[[DIM1]]) {memory_space = 1 : i64}
+ // CHECK-NOT: materialize_in_destination
+ // CHECK: linalg.add {{.*}} outs(%[[ALLOC]]
+ %0 = linalg.add ins(%arg0, %arg1 : tensor<?x42xf32>, tensor<?x42xf32>)
+ outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%root: !transform.any_op) {
+ %la = transform.structured.match ops{["linalg.add"]} in %root
+ : (!transform.any_op) -> !transform.any_op
+ %init = transform.get_operand %la[2]
+ : (!transform.any_op) -> !transform.any_value
+ transform.structured.promote_tensor to 1 %init : !transform.any_value
+
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: @promote_in0_out_bufferize
+// CHECK-SAME: (%[[ARG0:.+]]: tensor<?x42xf32>, %{{.*}}: tensor<42x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>)
+func.func @promote_in0_out_bufferize(%arg0: tensor<?x42xf32>, %arg1: tensor<42x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ // CHECK: %[[IN1:.+]] = bufferization.to_buffer %arg1 : tensor<42x?xf32> to memref<42x?xf32, strided<[?, ?], offset: ?>>
+ // CHECK: %[[IN0:.+]] = bufferization.to_buffer %arg0 : tensor<?x42xf32> to memref<?x42xf32, strided<[?, ?], offset: ?>>
+ // CHECK: %{{.+}} = bufferization.to_buffer %arg0 : tensor<?x42xf32> to memref<?x42xf32, strided<[?, ?], offset: ?>>
+ // CHECK: %{{.+}} = bufferization.to_buffer %arg2 : tensor<?x?xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
+ // CHECK: %{{.+}} = bufferization.to_buffer %arg2 : tensor<?x?xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
+ // CHECK: %{{.+}} = memref.dim %{{.+}}, %[[C0]] : memref<?x?xf32, strided<[?, ?], offset: ?>>
+ // CHECK: %[[C1:.+]] = arith.constant 1 : index
+ // CHECK: %{{.+}} = memref.dim %{{.+}}, %[[C1]] : memref<?x?xf32, strided<[?, ?], offset: ?>>
+ // CHECK: %[[ALLOC_OUT:.+]] = memref.alloc(%{{.+}}, %{{.+}}) {alignment = 64 : i64} : memref<?x?xf32, 1>
+ // CHECK: %{{.+}} = arith.constant 0 : index
+ // CHECK: %{{.+}} = memref.dim %{{.+}}, %{{.+}} : memref<?x42xf32, strided<[?, ?], offset: ?>>
+ // CHECK: %[[ALLOC_IN:.+]] = memref.alloc(%{{.+}}) {alignment = 64 : i64} : memref<?x42xf32, 1>
+ // CHECK: memref.copy %[[IN0]], %[[ALLOC_IN]] : memref<?x42xf32, strided<[?, ?], offset: ?>> to memref<?x42xf32, 1>
+ // CHECK: linalg.add ins(%[[ALLOC_IN]], %[[IN1]] : memref<?x42xf32, 1>, memref<42x?xf32, strided<[?, ?], offset: ?>>) outs(%[[ALLOC_OUT]] : memref<?x?xf32, 1>)
+ %0 = linalg.add ins(%arg0, %arg1: tensor<?x42xf32>, tensor<42x?xf32>)
+ outs(%arg2: tensor<?x?xf32>) -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%root: !transform.any_op) {
+ %la = transform.structured.match ops{["linalg.add"]} in %root
+ : (!transform.any_op) -> !transform.any_op
+ %op0 = transform.get_operand %la[0]
+ : (!transform.any_op) -> !transform.any_value
+ transform.structured.promote_tensor to 1 %op0 : !transform.any_value
+
+ %init = transform.get_operand %la[2]
+ : (!transform.any_op) -> !transform.any_value
+ transform.structured.promote_tensor to 1 %init : !transform.any_value
+
+ %func = transform.structured.match ops{["func.func"]} in %root
+ : (!transform.any_op) -> !transform.any_op
+
+ %bufferized = transform.bufferization.one_shot_bufferize %func
+ : (!transform.any_op) -> !transform.any_op
+
+ transform.yield
+ }
+}
+
+
+
diff --git a/mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir b/mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir
index 2e5f433..efc3890 100644
--- a/mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir
+++ b/mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir
@@ -19,3 +19,88 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
+
+// -----
+
+func.func private @f()
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ // expected-error@below {{'selected_region' attribute specifies region at index 2 while op has only 2 regions}}
+ transform.tune.alternatives<"bifurcation"> selected_region = 2 {
+ transform.yield
+ }, {
+ transform.yield
+ }
+ transform.yield
+ }
+}
+
+// -----
+
+func.func private @f()
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %singleton_of_c0 = transform.param.constant [0] -> !transform.any_param
+ // expected-error@below {{param should hold exactly one integer attribute, got: [0]}}
+ transform.tune.alternatives<"bifurcation"> selected_region = %singleton_of_c0 : !transform.any_param {
+ transform.yield
+ }, {
+ transform.yield
+ }
+ transform.yield
+ }
+}
+
+// -----
+
+func.func private @f()
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %c0 = transform.param.constant 0 -> !transform.any_param
+ %c1 = transform.param.constant 1 -> !transform.any_param
+ %c0_and_c1 = transform.merge_handles %c0, %c1 : !transform.any_param
+ // expected-error@below {{param should hold exactly one integer attribute}}
+ transform.tune.alternatives<"bifurcation"> selected_region = %c0_and_c1 : !transform.any_param {
+ transform.yield
+ }, {
+ transform.yield
+ }
+ transform.yield
+ }
+}
+
+// -----
+
+func.func private @f()
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %c2 = transform.param.constant 2 -> !transform.any_param
+ // expected-error@below {{'selected_region' attribute/param specifies region at index 2 while op has only 2 regions}}
+ transform.tune.alternatives<"bifurcation"> selected_region = %c2 : !transform.any_param {
+ transform.yield
+ }, {
+ transform.yield
+ }
+ transform.yield
+ }
+}
+
+// -----
+
+func.func private @f()
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ // expected-error@below {{non-deterministic choice "bifurcation" is only resolved through providing a `selected_region` attr/param}}
+ transform.tune.alternatives<"bifurcation"> {
+ transform.yield
+ }, {
+ transform.yield
+ }
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/Transform/test-tune-extension.mlir b/mlir/test/Dialect/Transform/test-tune-extension.mlir
index 0a253c6..5da48a2 100644
--- a/mlir/test/Dialect/Transform/test-tune-extension.mlir
+++ b/mlir/test/Dialect/Transform/test-tune-extension.mlir
@@ -59,3 +59,129 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
+
+
+// -----
+
+// CHECK-LABEL: schedule_with_two_independent_choices_already_made
+func.func @schedule_with_two_independent_choices_already_made(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32> {
+// CHECK-NOT: scf.forall
+// CHECK: scf.for
+// CHECK-NOT: scf.for
+// CHECK: scf.forall
+// CHECK-NOT: scf.for
+// CHECK: tensor.extract_slice
+// CHECK: tensor.extract_slice
+// CHECK: tensor.extract_slice
+// CHECK: linalg.matmul
+// CHECK: scf.forall.in_parallel
+// CHECK: tensor.parallel_insert_slice
+// CHECK: tensor.insert_slice
+// CHECK: scf.yield
+ %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>) -> tensor<128x128xf32>
+ return %0 : tensor<128x128xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+
+ %tiled_matmul = transform.tune.alternatives<"outer_par_or_seq_tiling"> selected_region = 0 -> !transform.any_op
+ { // First alternative/region, with index = 0
+ %contained_matmul, %loop = transform.structured.tile_using_for %matmul tile_sizes [8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ transform.yield %contained_matmul : !transform.any_op
+ }, { // Second alternative/region, with index = 1
+ %contained_matmul, %loop = transform.structured.tile_using_forall %matmul tile_sizes [8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ transform.yield %contained_matmul : !transform.any_op
+ }
+
+ transform.tune.alternatives<"inner_par_or_seq_tiling"> selected_region = 1 -> !transform.any_op {
+ %contained_matmul, %loop = transform.structured.tile_using_for %tiled_matmul tile_sizes [0, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ transform.yield %contained_matmul : !transform.any_op
+ }, {
+ %contained_matmul, %loop = transform.structured.tile_using_forall %tiled_matmul tile_sizes [0, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ transform.yield %contained_matmul : !transform.any_op
+ }
+
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: subschedule_with_choice_resolved_in_main_schedule
+func.func @subschedule_with_choice_resolved_in_main_schedule(
+ %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>)
+ -> tensor<128x128xf32> {
+// CHECK-NOT: scf.for
+// CHECK: scf.forall
+// CHECK-NOT: scf.forall
+// CHECK: scf.for
+// CHECK-NOT: scf.forall
+// CHECK: tensor.extract_slice
+// CHECK: tensor.extract_slice
+// CHECK: tensor.extract_slice
+// CHECK: linalg.matmul
+// CHECK: tensor.insert_slice
+// CHECK: scf.yield
+// CHECK: scf.forall.in_parallel
+// CHECK: tensor.parallel_insert_slice
+ %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
+ outs(%arg2: tensor<128x128xf32>) -> tensor<128x128xf32>
+ return %0 : tensor<128x128xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @subschedule_with_embedded_choice(%matmul: !transform.any_op {transform.readonly},
+ %par_or_seq: !transform.param<i64> {transform.readonly},
+ %tile_size: !transform.param<i64> {transform.readonly}) -> !transform.any_op {
+ %tiled_matmul = transform.tune.alternatives<"par_or_seq_tiling"> selected_region = %par_or_seq : !transform.param<i64> -> !transform.any_op {
+ %contained_matmul, %loop = transform.structured.tile_using_for %matmul tile_sizes [%tile_size] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
+ transform.yield %contained_matmul : !transform.any_op
+ }, {
+ %contained_matmul, %loop = transform.structured.tile_using_forall %matmul tile_sizes [%tile_size] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
+ transform.yield %contained_matmul : !transform.any_op
+ }
+ transform.yield %tiled_matmul : !transform.any_op
+ }
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %outer_par = transform.param.constant 1 -> !transform.param<i64>
+ %outer_tile_size = transform.param.constant 32 -> !transform.param<i64>
+ %inner_seq = transform.tune.knob<"inner_par_or_seq"> = 0 from options = [0, 1] -> !transform.param<i64>
+ %inner_tile_size = transform.param.constant 8 -> !transform.param<i64>
+ %tiled_matmul = transform.include @subschedule_with_embedded_choice failures(propagate) (%matmul, %outer_par, %outer_tile_size) : (!transform.any_op, !transform.param<i64>, !transform.param<i64>) -> !transform.any_op
+ %tiled_tiled_matmul = transform.include @subschedule_with_embedded_choice failures(propagate) (%tiled_matmul, %inner_seq, %inner_tile_size) : (!transform.any_op, !transform.param<i64>, !transform.param<i64>) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK-LABEL: eeny_meeny_miny_moe
+func.func private @eeny_meeny_miny_moe()
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+
+ %tiled_matmul = transform.tune.alternatives<"4way"> selected_region = 3 -> !transform.any_param
+ { // First alternative/region, with index = 0
+ %out = transform.param.constant "eeny" -> !transform.any_param
+ transform.yield %out : !transform.any_param
+ }, { // Second alternative/region, with index = 1
+ %out = transform.param.constant "meeny" -> !transform.any_param
+ transform.yield %out : !transform.any_param
+ }, { // Third alternative/region, with index = 2
+ %out = transform.param.constant "miny" -> !transform.any_param
+ transform.yield %out : !transform.any_param
+ }, { // Fourth alternative/region, with index = 3
+ %out = transform.param.constant "moe" -> !transform.any_param
+ transform.yield %out : !transform.any_param
+ }
+ transform.yield
+ }
+} \ No newline at end of file
diff --git a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir
index 03c6386..38392fd 100644
--- a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir
+++ b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir
@@ -282,15 +282,20 @@ gpu.module @test_distribution {
// CHECK-LABEL: @store_scatter
// CHECK-SAME: %[[ARG0:.*]]: memref<256xf16>
gpu.func @store_scatter(%dest : memref<256xf16>) {
- // CHECK: %[[VAL:.*]] = arith.constant dense<2.550000e+01> : vector<8xf16>
- // CHECK: %[[CST:.*]] = arith.constant dense<0> : vector<8xindex>
- // CHECK: %[[MASK:.*]] = arith.constant dense<true> : vector<8xi1>
+ // CHECK: %[[VAL:.*]] = arith.constant {layout_result_0 = #xegpu.layout<inst_data = [8]>} dense<2.550000e+01> : vector<8xf16>
+ // CHECK: %[[CST:.*]] = arith.constant {layout_result_0 = #xegpu.layout<inst_data = [8]>} dense<0> : vector<8xindex>
+ // CHECK: %[[MASK:.*]] = arith.constant {layout_result_0 = #xegpu.layout<inst_data = [8]>} dense<true> : vector<8xi1>
// CHECK: xegpu.store %[[VAL]], %[[ARG0]][%[[CST]]], %[[MASK]] <{chunk_size = 1 : i64, l1_hint = #xegpu.cache_hint<cached>}>
+ // CHECK-SAME: {layout_operand_0 = #xegpu.layout<inst_data = [8]>, layout_operand_2 = #xegpu.layout<inst_data = [8]>,
+ // CHECK-SAME: layout_operand_3 = #xegpu.layout<inst_data = [8]>}
// CHECK-SAME: : vector<8xf16>, memref<256xf16>, vector<8xindex>, vector<8xi1>
- %val = arith.constant {layout_result_0 = #xegpu.layout<sg_layout = [32], sg_data = [8]>} dense<25.5> : vector<256xf16>
- %offset = arith.constant {layout_result_0 = #xegpu.layout<sg_layout = [32], sg_data = [8]>} dense<0> : vector<256xindex>
- %mask = arith.constant {layout_result_0 = #xegpu.layout<sg_layout = [32], sg_data = [8]>} dense<1> : vector<256xi1>
- xegpu.store %val, %dest[%offset], %mask {chunk_size = 1, layout = #xegpu.layout<sg_layout = [32], sg_data = [8]>, l1_hint = #xegpu.cache_hint<cached>}
+ %val = arith.constant {layout_result_0 = #xegpu.layout<sg_layout = [32], sg_data = [8], inst_data = [8]>} dense<25.5> : vector<256xf16>
+ %offset = arith.constant {layout_result_0 = #xegpu.layout<sg_layout = [32], sg_data = [8], inst_data = [8]>} dense<0> : vector<256xindex>
+ %mask = arith.constant {layout_result_0 = #xegpu.layout<sg_layout = [32], sg_data = [8], inst_data = [8]>} dense<1> : vector<256xi1>
+ xegpu.store %val, %dest[%offset], %mask {chunk_size = 1, layout_operand_0 = #xegpu.layout<sg_layout = [32], sg_data = [8], inst_data = [8]>,
+ layout_operand_2 = #xegpu.layout<sg_layout = [32], sg_data = [8], inst_data = [8]>,
+ layout_operand_3 = #xegpu.layout<sg_layout = [32], sg_data = [8], inst_data = [8]>,
+ l1_hint = #xegpu.cache_hint<cached>}
: vector<256xf16>, memref<256xf16>, vector<256xindex>, vector<256xi1>
gpu.return
}
diff --git a/mlir/test/Target/LLVMIR/openmp-cli-tile01.mlir b/mlir/test/Target/LLVMIR/openmp-cli-tile01.mlir
new file mode 100644
index 0000000..4ac4f02
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/openmp-cli-tile01.mlir
@@ -0,0 +1,101 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+
+
+llvm.func @tile_trivial_loop(%baseptr: !llvm.ptr, %tc: i32, %ts: i32) -> () {
+ %literal_cli = omp.new_cli
+ omp.canonical_loop(%literal_cli) %iv : i32 in range(%tc) {
+ %ptr = llvm.getelementptr inbounds %baseptr[%iv] : (!llvm.ptr, i32) -> !llvm.ptr, f32
+ %val = llvm.mlir.constant(42.0 : f32) : f32
+ llvm.store %val, %ptr : f32, !llvm.ptr
+ omp.terminator
+ }
+ omp.tile <- (%literal_cli) sizes(%ts : i32)
+ llvm.return
+}
+
+
+// CHECK: ; ModuleID = 'LLVMDialectModule'
+// CHECK-NEXT: source_filename = "LLVMDialectModule"
+// CHECK-EMPTY:
+// CHECK-NEXT: define void @tile_trivial_loop(ptr %0, i32 %1, i32 %2) {
+// CHECK-NEXT: br label %omp_omp.loop.preheader
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_omp.loop.preheader: ; preds = %3
+// CHECK-NEXT: %4 = udiv i32 %1, %2
+// CHECK-NEXT: %5 = urem i32 %1, %2
+// CHECK-NEXT: %6 = icmp ne i32 %5, 0
+// CHECK-NEXT: %7 = zext i1 %6 to i32
+// CHECK-NEXT: %omp_floor0.tripcount = add nuw i32 %4, %7
+// CHECK-NEXT: br label %omp_floor0.preheader
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.preheader: ; preds = %omp_omp.loop.preheader
+// CHECK-NEXT: br label %omp_floor0.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.header: ; preds = %omp_floor0.inc, %omp_floor0.preheader
+// CHECK-NEXT: %omp_floor0.iv = phi i32 [ 0, %omp_floor0.preheader ], [ %omp_floor0.next, %omp_floor0.inc ]
+// CHECK-NEXT: br label %omp_floor0.cond
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.cond: ; preds = %omp_floor0.header
+// CHECK-NEXT: %omp_floor0.cmp = icmp ult i32 %omp_floor0.iv, %omp_floor0.tripcount
+// CHECK-NEXT: br i1 %omp_floor0.cmp, label %omp_floor0.body, label %omp_floor0.exit
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.body: ; preds = %omp_floor0.cond
+// CHECK-NEXT: %8 = icmp eq i32 %omp_floor0.iv, %4
+// CHECK-NEXT: %9 = select i1 %8, i32 %5, i32 %2
+// CHECK-NEXT: br label %omp_tile0.preheader
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.preheader: ; preds = %omp_floor0.body
+// CHECK-NEXT: br label %omp_tile0.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.header: ; preds = %omp_tile0.inc, %omp_tile0.preheader
+// CHECK-NEXT: %omp_tile0.iv = phi i32 [ 0, %omp_tile0.preheader ], [ %omp_tile0.next, %omp_tile0.inc ]
+// CHECK-NEXT: br label %omp_tile0.cond
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.cond: ; preds = %omp_tile0.header
+// CHECK-NEXT: %omp_tile0.cmp = icmp ult i32 %omp_tile0.iv, %9
+// CHECK-NEXT: br i1 %omp_tile0.cmp, label %omp_tile0.body, label %omp_tile0.exit
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.body: ; preds = %omp_tile0.cond
+// CHECK-NEXT: %10 = mul nuw i32 %2, %omp_floor0.iv
+// CHECK-NEXT: %11 = add nuw i32 %10, %omp_tile0.iv
+// CHECK-NEXT: br label %omp_omp.loop.body
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_omp.loop.body: ; preds = %omp_tile0.body
+// CHECK-NEXT: br label %omp.loop.region
+// CHECK-EMPTY:
+// CHECK-NEXT: omp.loop.region: ; preds = %omp_omp.loop.body
+// CHECK-NEXT: %12 = getelementptr inbounds float, ptr %0, i32 %11
+// CHECK-NEXT: store float 4.200000e+01, ptr %12, align 4
+// CHECK-NEXT: br label %omp.region.cont
+// CHECK-EMPTY:
+// CHECK-NEXT: omp.region.cont: ; preds = %omp.loop.region
+// CHECK-NEXT: br label %omp_tile0.inc
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.inc: ; preds = %omp.region.cont
+// CHECK-NEXT: %omp_tile0.next = add nuw i32 %omp_tile0.iv, 1
+// CHECK-NEXT: br label %omp_tile0.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.exit: ; preds = %omp_tile0.cond
+// CHECK-NEXT: br label %omp_tile0.after
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.after: ; preds = %omp_tile0.exit
+// CHECK-NEXT: br label %omp_floor0.inc
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.inc: ; preds = %omp_tile0.after
+// CHECK-NEXT: %omp_floor0.next = add nuw i32 %omp_floor0.iv, 1
+// CHECK-NEXT: br label %omp_floor0.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.exit: ; preds = %omp_floor0.cond
+// CHECK-NEXT: br label %omp_floor0.after
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.after: ; preds = %omp_floor0.exit
+// CHECK-NEXT: br label %omp_omp.loop.after
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_omp.loop.after: ; preds = %omp_floor0.after
+// CHECK-NEXT: ret void
+// CHECK-NEXT: }
+// CHECK-EMPTY:
+// CHECK-NEXT: !llvm.module.flags = !{!0}
+// CHECK-EMPTY:
+// CHECK-NEXT: !0 = !{i32 2, !"Debug Info Version", i32 3}
diff --git a/mlir/test/Target/LLVMIR/openmp-cli-tile02.mlir b/mlir/test/Target/LLVMIR/openmp-cli-tile02.mlir
new file mode 100644
index 0000000..6fad81c
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/openmp-cli-tile02.mlir
@@ -0,0 +1,190 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+
+llvm.func @tile_2d_loop(%baseptr: !llvm.ptr, %tc1: i32, %tc2: i32, %ts1: i32, %ts2: i32) -> () {
+ %literal_outer = omp.new_cli
+ %literal_inner = omp.new_cli
+ omp.canonical_loop(%literal_outer) %iv1 : i32 in range(%tc1) {
+ omp.canonical_loop(%literal_inner) %iv2 : i32 in range(%tc2) {
+ %idx = llvm.add %iv1, %iv2 : i32
+ %ptr = llvm.getelementptr inbounds %baseptr[%idx] : (!llvm.ptr, i32) -> !llvm.ptr, f32
+ %val = llvm.mlir.constant(42.0 : f32) : f32
+ llvm.store %val, %ptr : f32, !llvm.ptr
+ omp.terminator
+ }
+ omp.terminator
+ }
+ omp.tile <- (%literal_outer, %literal_inner) sizes(%ts1, %ts2 : i32,i32)
+ llvm.return
+}
+
+
+// CHECK: ; ModuleID = 'LLVMDialectModule'
+// CHECK-NEXT: source_filename = "LLVMDialectModule"
+// CHECK-EMPTY:
+// CHECK-NEXT: define void @tile_2d_loop(ptr %0, i32 %1, i32 %2, i32 %3, i32 %4) {
+// CHECK-NEXT: br label %omp_omp.loop.preheader
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_omp.loop.preheader: ; preds = %5
+// CHECK-NEXT: %6 = udiv i32 %1, %3
+// CHECK-NEXT: %7 = urem i32 %1, %3
+// CHECK-NEXT: %8 = icmp ne i32 %7, 0
+// CHECK-NEXT: %9 = zext i1 %8 to i32
+// CHECK-NEXT: %omp_floor0.tripcount = add nuw i32 %6, %9
+// CHECK-NEXT: %10 = udiv i32 %2, %4
+// CHECK-NEXT: %11 = urem i32 %2, %4
+// CHECK-NEXT: %12 = icmp ne i32 %11, 0
+// CHECK-NEXT: %13 = zext i1 %12 to i32
+// CHECK-NEXT: %omp_floor1.tripcount = add nuw i32 %10, %13
+// CHECK-NEXT: br label %omp_floor0.preheader
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_omp.loop.header: ; preds = %omp_omp.loop.inc
+// CHECK-NEXT: %omp_omp.loop.iv = phi i32 [ %omp_omp.loop.next, %omp_omp.loop.inc ]
+// CHECK-NEXT: br label %omp_omp.loop.cond
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_omp.loop.cond: ; preds = %omp_omp.loop.header
+// CHECK-NEXT: %omp_omp.loop.cmp = icmp ult i32 %19, %1
+// CHECK-NEXT: br i1 %omp_omp.loop.cmp, label %omp_omp.loop.body, label %omp_omp.loop.exit
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_omp.loop.body: ; preds = %omp_tile1.body, %omp_omp.loop.cond
+// CHECK-NEXT: br label %omp.loop.region
+// CHECK-EMPTY:
+// CHECK-NEXT: omp.loop.region: ; preds = %omp_omp.loop.body
+// CHECK-NEXT: br label %omp_omp.loop.preheader1
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_omp.loop.preheader1: ; preds = %omp.loop.region
+// CHECK-NEXT: br label %omp_omp.loop.body4
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.preheader: ; preds = %omp_omp.loop.preheader
+// CHECK-NEXT: br label %omp_floor0.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.header: ; preds = %omp_floor0.inc, %omp_floor0.preheader
+// CHECK-NEXT: %omp_floor0.iv = phi i32 [ 0, %omp_floor0.preheader ], [ %omp_floor0.next, %omp_floor0.inc ]
+// CHECK-NEXT: br label %omp_floor0.cond
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.cond: ; preds = %omp_floor0.header
+// CHECK-NEXT: %omp_floor0.cmp = icmp ult i32 %omp_floor0.iv, %omp_floor0.tripcount
+// CHECK-NEXT: br i1 %omp_floor0.cmp, label %omp_floor0.body, label %omp_floor0.exit
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.body: ; preds = %omp_floor0.cond
+// CHECK-NEXT: br label %omp_floor1.preheader
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor1.preheader: ; preds = %omp_floor0.body
+// CHECK-NEXT: br label %omp_floor1.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor1.header: ; preds = %omp_floor1.inc, %omp_floor1.preheader
+// CHECK-NEXT: %omp_floor1.iv = phi i32 [ 0, %omp_floor1.preheader ], [ %omp_floor1.next, %omp_floor1.inc ]
+// CHECK-NEXT: br label %omp_floor1.cond
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor1.cond: ; preds = %omp_floor1.header
+// CHECK-NEXT: %omp_floor1.cmp = icmp ult i32 %omp_floor1.iv, %omp_floor1.tripcount
+// CHECK-NEXT: br i1 %omp_floor1.cmp, label %omp_floor1.body, label %omp_floor1.exit
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor1.body: ; preds = %omp_floor1.cond
+// CHECK-NEXT: %14 = icmp eq i32 %omp_floor0.iv, %6
+// CHECK-NEXT: %15 = select i1 %14, i32 %7, i32 %3
+// CHECK-NEXT: %16 = icmp eq i32 %omp_floor1.iv, %10
+// CHECK-NEXT: %17 = select i1 %16, i32 %11, i32 %4
+// CHECK-NEXT: br label %omp_tile0.preheader
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.preheader: ; preds = %omp_floor1.body
+// CHECK-NEXT: br label %omp_tile0.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.header: ; preds = %omp_tile0.inc, %omp_tile0.preheader
+// CHECK-NEXT: %omp_tile0.iv = phi i32 [ 0, %omp_tile0.preheader ], [ %omp_tile0.next, %omp_tile0.inc ]
+// CHECK-NEXT: br label %omp_tile0.cond
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.cond: ; preds = %omp_tile0.header
+// CHECK-NEXT: %omp_tile0.cmp = icmp ult i32 %omp_tile0.iv, %15
+// CHECK-NEXT: br i1 %omp_tile0.cmp, label %omp_tile0.body, label %omp_tile0.exit
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.body: ; preds = %omp_tile0.cond
+// CHECK-NEXT: br label %omp_tile1.preheader
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile1.preheader: ; preds = %omp_tile0.body
+// CHECK-NEXT: br label %omp_tile1.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile1.header: ; preds = %omp_tile1.inc, %omp_tile1.preheader
+// CHECK-NEXT: %omp_tile1.iv = phi i32 [ 0, %omp_tile1.preheader ], [ %omp_tile1.next, %omp_tile1.inc ]
+// CHECK-NEXT: br label %omp_tile1.cond
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile1.cond: ; preds = %omp_tile1.header
+// CHECK-NEXT: %omp_tile1.cmp = icmp ult i32 %omp_tile1.iv, %17
+// CHECK-NEXT: br i1 %omp_tile1.cmp, label %omp_tile1.body, label %omp_tile1.exit
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile1.body: ; preds = %omp_tile1.cond
+// CHECK-NEXT: %18 = mul nuw i32 %3, %omp_floor0.iv
+// CHECK-NEXT: %19 = add nuw i32 %18, %omp_tile0.iv
+// CHECK-NEXT: %20 = mul nuw i32 %4, %omp_floor1.iv
+// CHECK-NEXT: %21 = add nuw i32 %20, %omp_tile1.iv
+// CHECK-NEXT: br label %omp_omp.loop.body
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_omp.loop.body4: ; preds = %omp_omp.loop.preheader1
+// CHECK-NEXT: br label %omp.loop.region12
+// CHECK-EMPTY:
+// CHECK-NEXT: omp.loop.region12: ; preds = %omp_omp.loop.body4
+// CHECK-NEXT: %22 = add i32 %19, %21
+// CHECK-NEXT: %23 = getelementptr inbounds float, ptr %0, i32 %22
+// CHECK-NEXT: store float 4.200000e+01, ptr %23, align 4
+// CHECK-NEXT: br label %omp.region.cont11
+// CHECK-EMPTY:
+// CHECK-NEXT: omp.region.cont11: ; preds = %omp.loop.region12
+// CHECK-NEXT: br label %omp_tile1.inc
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile1.inc: ; preds = %omp.region.cont11
+// CHECK-NEXT: %omp_tile1.next = add nuw i32 %omp_tile1.iv, 1
+// CHECK-NEXT: br label %omp_tile1.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile1.exit: ; preds = %omp_tile1.cond
+// CHECK-NEXT: br label %omp_tile1.after
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile1.after: ; preds = %omp_tile1.exit
+// CHECK-NEXT: br label %omp_tile0.inc
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.inc: ; preds = %omp_tile1.after
+// CHECK-NEXT: %omp_tile0.next = add nuw i32 %omp_tile0.iv, 1
+// CHECK-NEXT: br label %omp_tile0.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.exit: ; preds = %omp_tile0.cond
+// CHECK-NEXT: br label %omp_tile0.after
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_tile0.after: ; preds = %omp_tile0.exit
+// CHECK-NEXT: br label %omp_floor1.inc
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor1.inc: ; preds = %omp_tile0.after
+// CHECK-NEXT: %omp_floor1.next = add nuw i32 %omp_floor1.iv, 1
+// CHECK-NEXT: br label %omp_floor1.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor1.exit: ; preds = %omp_floor1.cond
+// CHECK-NEXT: br label %omp_floor1.after
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor1.after: ; preds = %omp_floor1.exit
+// CHECK-NEXT: br label %omp_floor0.inc
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.inc: ; preds = %omp_floor1.after
+// CHECK-NEXT: %omp_floor0.next = add nuw i32 %omp_floor0.iv, 1
+// CHECK-NEXT: br label %omp_floor0.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.exit: ; preds = %omp_floor0.cond
+// CHECK-NEXT: br label %omp_floor0.after
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_floor0.after: ; preds = %omp_floor0.exit
+// CHECK-NEXT: br label %omp_omp.loop.after
+// CHECK-EMPTY:
+// CHECK-NEXT: omp.region.cont: ; No predecessors!
+// CHECK-NEXT: br label %omp_omp.loop.inc
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_omp.loop.inc: ; preds = %omp.region.cont
+// CHECK-NEXT: %omp_omp.loop.next = add nuw i32 %19, 1
+// CHECK-NEXT: br label %omp_omp.loop.header
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_omp.loop.exit: ; preds = %omp_omp.loop.cond
+// CHECK-NEXT: br label %omp_omp.loop.after
+// CHECK-EMPTY:
+// CHECK-NEXT: omp_omp.loop.after: ; preds = %omp_floor0.after, %omp_omp.loop.exit
+// CHECK-NEXT: ret void
+// CHECK-NEXT: }
+// CHECK-EMPTY:
+// CHECK-NEXT: !llvm.module.flags = !{!0}
+// CHECK-EMPTY:
+// CHECK-NEXT: !0 = !{i32 2, !"Debug Info Version", i32 3}
diff --git a/mlir/test/Target/LLVMIR/rocdl.mlir b/mlir/test/Target/LLVMIR/rocdl.mlir
index e043a8c..00ee6b7 100644
--- a/mlir/test/Target/LLVMIR/rocdl.mlir
+++ b/mlir/test/Target/LLVMIR/rocdl.mlir
@@ -1340,6 +1340,34 @@ llvm.func @rocdl.cvt.scale.pk8(%i32: i32, %v2xi32: vector<2xi32>, %scale: i32) {
llvm.return
}
+// CHECK-LABEL: rocdl.cvt.scalef32.pk8
+// CHECK-SAME:(<8 x float> %[[V8F32:.+]], <8 x half> %[[V8F16:.+]], <8 x bfloat> %[[V8BF16:.+]], float %[[SCALE:.+]])
+llvm.func @rocdl.cvt.scalef32.pk8(%v8xf32: vector<8xf32>, %v8xf16: vector<8xf16>, %v8xbf16: vector<8xbf16>, %scale: f32) {
+
+ // CHECK: call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.f32(<8 x float> %[[V8F32]], float %[[SCALE]])
+ %0 = rocdl.cvt.scalef32.pk8.fp8.f32 %v8xf32, %scale : vector<2xi32>
+ // CHECK: call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.f32(<8 x float> %[[V8F32]], float %[[SCALE]])
+ %1 = rocdl.cvt.scalef32.pk8.bf8.f32 %v8xf32, %scale : vector<2xi32>
+ // CHECK: call i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.f32(<8 x float> %[[V8F32]], float %[[SCALE]])
+ %2 = rocdl.cvt.scalef32.pk8.fp4.f32 %v8xf32, %scale : i32
+
+ // CHECK: call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.f16(<8 x half> %[[V8F16]], float %[[SCALE]])
+ %3 = rocdl.cvt.scalef32.pk8.fp8.f16 %v8xf16, %scale : vector<2xi32>
+ // CHECK: call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.f16(<8 x half> %[[V8F16]], float %[[SCALE]])
+ %4 = rocdl.cvt.scalef32.pk8.bf8.f16 %v8xf16, %scale : vector<2xi32>
+ // CHECK: call i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.f16(<8 x half> %[[V8F16]], float %[[SCALE]])
+ %5 = rocdl.cvt.scalef32.pk8.fp4.f16 %v8xf16, %scale : i32
+
+ // CHECK: call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.bf16(<8 x bfloat> %[[V8BF16]], float %[[SCALE]])
+ %6 = rocdl.cvt.scalef32.pk8.fp8.bf16 %v8xbf16, %scale : vector<2xi32>
+ // CHECK: call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.bf16(<8 x bfloat> %[[V8BF16]], float %[[SCALE]])
+ %7 = rocdl.cvt.scalef32.pk8.bf8.bf16 %v8xbf16, %scale : vector<2xi32>
+ // CHECK: call i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.bf16(<8 x bfloat> %[[V8BF16]], float %[[SCALE]])
+ %8 = rocdl.cvt.scalef32.pk8.fp4.bf16 %v8xbf16, %scale : i32
+
+ llvm.return
+}
+
// CHECK-LABEL: @rocdl.cvt.scale.pk16
// CHECK-SAME:(<3 x i32> %[[SRC0:.+]], i32 %[[SCALE:.+]])
llvm.func @rocdl.cvt.scale.pk16(%v3xi32: vector<3xi32>, %scale:i32) {
diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/CMakeLists.txt b/mlir/test/lib/Dialect/TestIRDLToCpp/CMakeLists.txt
index 103bc94..7d32577 100644
--- a/mlir/test/lib/Dialect/TestIRDLToCpp/CMakeLists.txt
+++ b/mlir/test/lib/Dialect/TestIRDLToCpp/CMakeLists.txt
@@ -12,5 +12,7 @@ add_mlir_library(MLIRTestIRDLToCppDialect
mlir_target_link_libraries(MLIRTestIRDLToCppDialect PUBLIC
MLIRIR
MLIRPass
+ MLIRSCFDialect
MLIRTransforms
+ MLIRTestDialect
)
diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp b/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
index 9550e4c..421db7e 100644
--- a/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
+++ b/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp
@@ -13,6 +13,7 @@
// #include "mlir/IR/Dialect.h"
#include "mlir/IR/Region.h"
+#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/DialectImplementation.h"
#include "mlir/Interfaces/InferTypeOpInterface.h"
@@ -54,16 +55,34 @@ struct TestOpConversion : public OpConversionPattern<test_irdl_to_cpp::BeefOp> {
}
};
+struct TestRegionConversion
+ : public OpConversionPattern<test_irdl_to_cpp::ConditionalOp> {
+ using OpConversionPattern::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(mlir::test_irdl_to_cpp::ConditionalOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ // Just exercising the C++ API even though these are not enforced in the
+ // dialect definition
+ assert(op.getThen().getBlocks().size() == 1);
+ assert(adaptor.getElse().getBlocks().size() == 1);
+ auto ifOp = scf::IfOp::create(rewriter, op.getLoc(), op.getInput());
+ rewriter.replaceOp(op, ifOp);
+ return success();
+ }
+};
+
struct ConvertTestDialectToSomethingPass
: PassWrapper<ConvertTestDialectToSomethingPass, OperationPass<ModuleOp>> {
void runOnOperation() override {
MLIRContext *ctx = &getContext();
RewritePatternSet patterns(ctx);
- patterns.add<TestOpConversion>(ctx);
+ patterns.add<TestOpConversion, TestRegionConversion>(ctx);
ConversionTarget target(getContext());
- target.addIllegalOp<test_irdl_to_cpp::BeefOp>();
- target.addLegalOp<test_irdl_to_cpp::BarOp>();
- target.addLegalOp<test_irdl_to_cpp::HashOp>();
+ target.addIllegalOp<test_irdl_to_cpp::BeefOp,
+ test_irdl_to_cpp::ConditionalOp>();
+ target.addLegalOp<test_irdl_to_cpp::BarOp, test_irdl_to_cpp::HashOp,
+ scf::IfOp, scf::YieldOp>();
if (failed(applyPartialConversion(getOperation(), target,
std::move(patterns))))
signalPassFailure();
@@ -73,6 +92,10 @@ struct ConvertTestDialectToSomethingPass
StringRef getDescription() const final {
return "Checks the convertability of an irdl dialect";
}
+
+ void getDependentDialects(DialectRegistry &registry) const override {
+ registry.insert<scf::SCFDialect>();
+ }
};
void registerIrdlTestDialect(mlir::DialectRegistry &registry) {
diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/test_conversion.testd.mlir b/mlir/test/lib/Dialect/TestIRDLToCpp/test_conversion.testd.mlir
index f6233ee..1915324 100644
--- a/mlir/test/lib/Dialect/TestIRDLToCpp/test_conversion.testd.mlir
+++ b/mlir/test/lib/Dialect/TestIRDLToCpp/test_conversion.testd.mlir
@@ -1,15 +1,29 @@
// RUN: mlir-opt %s --pass-pipeline="builtin.module(test-irdl-conversion-check)" | FileCheck %s
// CHECK-LABEL: module {
module {
- // CHECK: func.func @test() {
+ // CHECK: func.func @test(%[[test_arg:[^ ]*]]: i1) {
// CHECK: %[[v0:[^ ]*]] = "test_irdl_to_cpp.bar"() : () -> i32
// CHECK: %[[v1:[^ ]*]] = "test_irdl_to_cpp.bar"() : () -> i32
// CHECK: %[[v2:[^ ]*]] = "test_irdl_to_cpp.hash"(%[[v0]], %[[v0]]) : (i32, i32) -> i32
+ // CHECK: scf.if %[[test_arg]]
// CHECK: return
// CHECK: }
- func.func @test() {
+ func.func @test(%test_arg: i1) {
%0 = "test_irdl_to_cpp.bar"() : () -> i32
%1 = "test_irdl_to_cpp.beef"(%0, %0) : (i32, i32) -> i32
+ "test_irdl_to_cpp.conditional"(%test_arg) ({
+ ^cond(%test: i1):
+ %3 = "test_irdl_to_cpp.bar"() : () -> i32
+ "test.terminator"() : ()->()
+ }, {
+ ^then(%what: i1, %ever: i32):
+ %4 = "test_irdl_to_cpp.bar"() : () -> i32
+ "test.terminator"() : ()->()
+ }, {
+ ^else():
+ %5 = "test_irdl_to_cpp.bar"() : () -> i32
+ "test.terminator"() : ()->()
+ }) : (i1) -> ()
return
}
diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp.irdl.mlir b/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp.irdl.mlir
index 42e713e..85fb8cb 100644
--- a/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp.irdl.mlir
+++ b/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp.irdl.mlir
@@ -2,7 +2,7 @@
// CHECK: class TestIrdlToCpp
irdl.dialect @test_irdl_to_cpp {
-
+
// CHECK: class FooType
irdl.type @foo
@@ -32,4 +32,53 @@ irdl.dialect @test_irdl_to_cpp {
irdl.operands(lhs: %0, rhs: %0)
irdl.results(res: %0)
}
+
+ // CHECK: ConditionalOp declarations
+ // CHECK: ConditionalOpGenericAdaptorBase
+ // CHECK: ::mlir::Region &getCond() { return *getRegions()[0]; }
+ // CHECK: ::mlir::Region &getThen() { return *getRegions()[1]; }
+ // CHECK: ::mlir::Region &getElse() { return *getRegions()[2]; }
+ //
+ // CHECK: class ConditionalOp : public ::mlir::Op<ConditionalOp, ::mlir::OpTrait::NRegions<3>::Impl, ::mlir::OpTrait::OpInvariants>
+ // CHECK: ::mlir::Region &getCond() { return (*this)->getRegion(0); }
+ // CHECK: ::mlir::Region &getThen() { return (*this)->getRegion(1); }
+ // CHECK: ::mlir::Region &getElse() { return (*this)->getRegion(2); }
+
+ // CHECK: ConditionalOp definitions
+ // CHECK: __mlir_irdl_local_region_constraint_ConditionalOp_cond
+ // CHECK: if (!(region.getNumArguments() == 1)) {
+ // CHECK: failed to verify constraint: region with 1 entry block argument(s)
+
+ // CHECK: __mlir_irdl_local_region_constraint_ConditionalOp_then
+ // CHECK: if (!(true)) {
+
+ // CHECK: __mlir_irdl_local_region_constraint_ConditionalOp_else
+ // CHECK: if (!(region.getNumArguments() == 0)) {
+ // CHECK: failed to verify constraint: region with 0 entry block argument(s)
+
+ // CHECK: ConditionalOp::build
+ // CHECK: for (unsigned i = 0; i != 3; ++i)
+ // CHECK-NEXT: (void)odsState.addRegion();
+
+ // CHECK: ConditionalOp::verifyInvariantsImpl
+ // CHECK: __mlir_irdl_local_region_constraint_ConditionalOp_cond
+ // CHECK: failure
+ // CHECK: __mlir_irdl_local_region_constraint_ConditionalOp_then
+ // CHECK: failure
+ // CHECK: __mlir_irdl_local_region_constraint_ConditionalOp_else
+ // CHECK: failure
+ // CHECK: success
+ irdl.operation @conditional {
+ %r0 = irdl.region // Unconstrained region
+ %r1 = irdl.region() // Region with no entry block arguments
+
+ // TODO(#161018): support irdl.is in irdl-to-cpp
+ // %v0 = irdl.is i1 // Type constraint: i1 (boolean)
+ %v0 = irdl.any
+ %r2 = irdl.region(%v0) // Region with one i1 entry block argument
+ irdl.regions(cond: %r2, then: %r0, else: %r1)
+
+ %0 = irdl.any
+ irdl.operands(input: %0)
+ }
}
diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp_invalid_unsupported_types.irdl.mlir b/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp_invalid_unsupported_types.irdl.mlir
index 403b492..cc27456 100644
--- a/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp_invalid_unsupported_types.irdl.mlir
+++ b/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp_invalid_unsupported_types.irdl.mlir
@@ -7,7 +7,7 @@ irdl.dialect @test_irdl_to_cpp {
irdl.results(res: %1)
}
}
-// -----
+// -----
irdl.dialect @test_irdl_to_cpp {
irdl.operation @operands_no_any_of {
@@ -42,7 +42,7 @@ irdl.dialect @test_irdl_to_cpp {
irdl.dialect @test_irdl_to_cpp {
irdl.type @ty {
- %0 = irdl.any
+ %0 = irdl.any
// expected-error@+1 {{IRDL C++ translation does not yet support translation of irdl.parameters operation}}
irdl.parameters(ty: %0)
}
@@ -51,29 +51,8 @@ irdl.dialect @test_irdl_to_cpp {
// -----
irdl.dialect @test_irdl_to_cpp {
- irdl.operation @test_op {
- // expected-error@+1 {{IRDL C++ translation does not yet support translation of irdl.region operation}}
- %0 = irdl.region()
- irdl.regions(reg: %0)
- }
-
-}
-
-// -----
-
-irdl.dialect @test_irdl_to_cpp {
- irdl.operation @test_op {
- // expected-error@+1 {{IRDL C++ translation does not yet support translation of irdl.regions operation}}
- irdl.regions()
- }
-
-}
-
-// -----
-
-irdl.dialect @test_irdl_to_cpp {
irdl.type @test_derived {
// expected-error@+1 {{IRDL C++ translation does not yet support translation of irdl.base operation}}
%0 = irdl.base "!builtin.integer"
- }
+ }
}
diff --git a/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp b/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp
index 094ef0a..e51cac4 100644
--- a/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp
+++ b/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp
@@ -173,8 +173,6 @@ struct TestXeGPUUnrollingPatterns
#undef DEBUG_TYPE
#define DEBUG_TYPE "test-xegpu-layout-interface"
-#define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ")
-#define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n")
// Test pattern for distributing vector::StepOp from workgroup to subgroup.
// Validates DistributeLayoutAttr interfaces for offset computation
diff --git a/mlir/test/mlir-tblgen/op-format-invalid.td b/mlir/test/mlir-tblgen/op-format-invalid.td
index 2f29543..0a022ad 100644
--- a/mlir/test/mlir-tblgen/op-format-invalid.td
+++ b/mlir/test/mlir-tblgen/op-format-invalid.td
@@ -307,7 +307,7 @@ def DirectiveTypeZOperandInvalidI : TestFormat_Op<[{
def LiteralInvalidA : TestFormat_Op<[{
`a:`
}]>;
-// CHECK: error: expected valid literal but got '1': single character literal must be a letter or one of '_:,=<>()[]{}?+*'
+// CHECK: error: expected valid literal but got '1': single character literal must be a letter or one of '_:,=<>()[]{}?+-*'
def LiteralInvalidB : TestFormat_Op<[{
`1`
}]>;
diff --git a/mlir/test/mlir-tblgen/op-format-spec.td b/mlir/test/mlir-tblgen/op-format-spec.td
index 1541cd0..1ac2311 100644
--- a/mlir/test/mlir-tblgen/op-format-spec.td
+++ b/mlir/test/mlir-tblgen/op-format-spec.td
@@ -123,7 +123,7 @@ def DirectiveTypeValid : TestFormat_Op<[{
// CHECK-NOT: error
def LiteralValid : TestFormat_Op<[{
- `_` `:` `,` `=` `<` `>` `(` `)` `[` `]` `?` `+` `*` ` ` `` `->` `\n` `abc$._`
+ `_` `:` `,` `=` `<` `>` `(` `)` `[` `]` `?` `+` `-` `*` ` ` `` `->` `\n` `abc$._`
attr-dict
}]>;
diff --git a/mlir/test/python/dialects/transform_tune_ext.py b/mlir/test/python/dialects/transform_tune_ext.py
index dfb9359..eb2a083 100644
--- a/mlir/test/python/dialects/transform_tune_ext.py
+++ b/mlir/test/python/dialects/transform_tune_ext.py
@@ -1,21 +1,21 @@
# RUN: %PYTHON %s | FileCheck %s
-from mlir.ir import *
+from mlir import ir
from mlir.dialects import transform
from mlir.dialects.transform import tune, debug
def run(f):
- print("\nTEST:", f.__name__)
- with Context(), Location.unknown():
- module = Module.create()
- with InsertionPoint(module.body):
+ print("\n// TEST:", f.__name__)
+ with ir.Context(), ir.Location.unknown():
+ module = ir.Module.create()
+ with ir.InsertionPoint(module.body):
sequence = transform.SequenceOp(
transform.FailurePropagationMode.Propagate,
[],
transform.AnyOpType.get(),
)
- with InsertionPoint(sequence.body):
+ with ir.InsertionPoint(sequence.body):
f(sequence.bodyTarget)
transform.YieldOp()
print(module)
@@ -29,10 +29,10 @@ def testKnobOp(target):
# CHECK: %[[HEADS_OR_TAILS:.*]] = transform.tune.knob<"coin"> options = [true, false] -> !transform.any_param
heads_or_tails = tune.KnobOp(
- result=any_param, name=StringAttr.get("coin"), options=[True, False]
+ result=any_param, name=ir.StringAttr.get("coin"), options=[True, False]
)
# CHECK: transform.tune.knob<"animal"> options = ["cat", "dog", unit] -> !transform.any_param
- tune.KnobOp(any_param, name="animal", options=["cat", "dog", UnitAttr.get()])
+ tune.KnobOp(any_param, name="animal", options=["cat", "dog", ir.UnitAttr.get()])
# CHECK: transform.tune.knob<"tile_size"> options = [2, 4, 8, 16, 24, 32] -> !transform.any_param
tune.KnobOp(any_param, "tile_size", [2, 4, 8, 16, 24, 32])
# CHECK: transform.tune.knob<"magic_value"> options = [2.000000e+00, 2.250000e+00, 2.500000e+00, 2.750000e+00, 3.000000e+00] -> !transform.any_param
@@ -45,7 +45,10 @@ def testKnobOp(target):
heads = tune.KnobOp(any_param, "coin", options=[True, False], selected=True)
# CHECK: transform.tune.knob<"animal"> = "dog" from options = ["cat", "dog", unit] -> !transform.any_param
tune.KnobOp(
- any_param, name="animal", options=["cat", "dog", UnitAttr.get()], selected="dog"
+ any_param,
+ name="animal",
+ options=["cat", "dog", ir.UnitAttr.get()],
+ selected="dog",
)
# CHECK: transform.tune.knob<"tile_size"> = 8 : i64 from options = [2, 4, 8, 16, 24, 32] -> !transform.any_param
tune.KnobOp(any_param, "tile_size", [2, 4, 8, 16, 24, 32], selected=8)
@@ -57,16 +60,90 @@ def testKnobOp(target):
# CHECK: transform.tune.knob<"range_as_a_dict"> = 4 : i64 from options = {start = 2 : i64, step = 2 : i64, stop = 16 : i64} -> !transform.any_param
# NB: Membership of `selected` in non-ArrayAttr `options` is _not_ verified.
- i64 = IntegerType.get_signless(64)
+ i64 = ir.IntegerType.get_signless(64)
tune.knob(
any_param,
"range_as_a_dict",
- DictAttr.get(
+ ir.DictAttr.get(
{
- "start": IntegerAttr.get(i64, 2),
- "stop": IntegerAttr.get(i64, 16),
- "step": IntegerAttr.get(i64, 2),
+ "start": ir.IntegerAttr.get(i64, 2),
+ "stop": ir.IntegerAttr.get(i64, 16),
+ "step": ir.IntegerAttr.get(i64, 2),
}
),
selected=4,
)
+
+
+# CHECK-LABEL: TEST: testAlternativesOp
+@run
+def testAlternativesOp(target):
+ any_param = transform.AnyParamType.get()
+
+ # CHECK: %[[LEFT_OR_RIGHT_OUTCOME:.*]] = transform.tune.alternatives<"left_or_right"> -> !transform.any_param {
+ left_or_right = tune.AlternativesOp(
+ [transform.AnyParamType.get()], "left_or_right", 2
+ )
+ idx_for_left, idx_for_right = 0, 1
+ with ir.InsertionPoint(left_or_right.alternatives[idx_for_left].blocks[0]):
+ # CHECK: %[[C0:.*]] = transform.param.constant 0
+ i32_0 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 0)
+ c0 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_0)
+ # CHECK: transform.yield %[[C0]]
+ transform.yield_(c0)
+ # CHECK-NEXT: }, {
+ with ir.InsertionPoint(left_or_right.alternatives[idx_for_right].blocks[0]):
+ # CHECK: %[[C1:.*]] = transform.param.constant 1
+ i32_1 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 1)
+ c1 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_1)
+ # CHECK: transform.yield %[[C1]]
+ transform.yield_(c1)
+ # CHECK-NEXT: }
+ outcome_of_left_or_right_decision = left_or_right.results[0]
+
+ # CHECK: transform.tune.alternatives<"fork_in_the_road"> selected_region = 0 -> !transform.any_param {
+ fork_in_the_road = tune.AlternativesOp(
+ [transform.AnyParamType.get()], "fork_in_the_road", 2, selected_region=0
+ )
+ with ir.InsertionPoint(fork_in_the_road.alternatives[idx_for_left].blocks[0]):
+ # CHECK: %[[C0:.*]] = transform.param.constant 0
+ i32_0 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 0)
+ c0 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_0)
+ # CHECK: transform.yield %[[C0]]
+ transform.yield_(c0)
+ # CHECK-NEXT: }, {
+ with ir.InsertionPoint(fork_in_the_road.alternatives[idx_for_right].blocks[0]):
+ # CHECK: %[[C1:.*]] = transform.param.constant 1
+ i32_1 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 1)
+ c1 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_1)
+ # CHECK: transform.yield %[[C1]]
+ transform.yield_(c1)
+ # CHECK-NEXT: }
+
+ # CHECK: transform.tune.alternatives<"left_or_right_as_before"> selected_region = %[[LEFT_OR_RIGHT_OUTCOME]] : !transform.any_param {
+ left_or_right_as_before = tune.AlternativesOp(
+ [],
+ "left_or_right_as_before",
+ 2,
+ selected_region=outcome_of_left_or_right_decision,
+ )
+ with ir.InsertionPoint(
+ left_or_right_as_before.alternatives[idx_for_left].blocks[0]
+ ):
+ # CHECK: transform.param.constant 1337
+ i32_1337 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 1337)
+ c1337 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_1337)
+ # CHECK: transform.debug.emit_param_as_remark
+ debug.emit_param_as_remark(c1337)
+ transform.yield_([])
+ # CHECK-NEXT: }, {
+ with ir.InsertionPoint(
+ left_or_right_as_before.alternatives[idx_for_right].blocks[0]
+ ):
+ # CHECK: transform.param.constant 42
+ i32_42 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 42)
+ c42 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_42)
+ # CHECK: transform.debug.emit_param_as_remark
+ debug.emit_param_as_remark(c42)
+ transform.yield_([])
+ # CHECK-NEXT: }
diff --git a/mlir/test/python/ir/operation.py b/mlir/test/python/ir/operation.py
index 4a3625c..cb4cfc8c 100644
--- a/mlir/test/python/ir/operation.py
+++ b/mlir/test/python/ir/operation.py
@@ -696,6 +696,7 @@ def testOperationPrint():
# CHECK: resource1: "0x08
module.operation.print(large_elements_limit=2)
+
# CHECK-LABEL: TEST: testKnownOpView
@run
def testKnownOpView():
@@ -969,6 +970,13 @@ def testOperationLoc():
assert op.location == loc
assert op.operation.location == loc
+ another_loc = Location.name("another_loc")
+ op.location = another_loc
+ assert op.location == another_loc
+ assert op.operation.location == another_loc
+ # CHECK: loc("another_loc")
+ print(op.location)
+
# CHECK-LABEL: TEST: testModuleMerge
@run
diff --git a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp
index a1899a8..8dd9713 100644
--- a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp
+++ b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp
@@ -403,6 +403,7 @@ void DefFormat::genLiteralParser(StringRef value, FmtContext &ctx,
.Case("]", "RSquare")
.Case("?", "Question")
.Case("+", "Plus")
+ .Case("-", "Minus")
.Case("*", "Star")
.Case("...", "Ellipsis")
<< "()";
diff --git a/mlir/tools/mlir-tblgen/FormatGen.cpp b/mlir/tools/mlir-tblgen/FormatGen.cpp
index 4dfdde2..04d3ed1 100644
--- a/mlir/tools/mlir-tblgen/FormatGen.cpp
+++ b/mlir/tools/mlir-tblgen/FormatGen.cpp
@@ -518,7 +518,7 @@ bool mlir::tblgen::isValidLiteral(StringRef value,
// If there is only one character, this must either be punctuation or a
// single character bare identifier.
if (value.size() == 1) {
- StringRef bare = "_:,=<>()[]{}?+*";
+ StringRef bare = "_:,=<>()[]{}?+-*";
if (isalpha(front) || bare.contains(front))
return true;
if (emitError)
diff --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp
index 0d113b3..ccf21d1 100644
--- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp
+++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp
@@ -852,6 +852,7 @@ static void genLiteralParser(StringRef value, MethodBody &body) {
.Case("]", "RSquare()")
.Case("?", "Question()")
.Case("+", "Plus()")
+ .Case("-", "Minus()")
.Case("*", "Star()")
.Case("...", "Ellipsis()");
}
diff --git a/offload/libomptarget/OpenMP/InteropAPI.cpp b/offload/libomptarget/OpenMP/InteropAPI.cpp
index eb5425e..c55ef2c 100644
--- a/offload/libomptarget/OpenMP/InteropAPI.cpp
+++ b/offload/libomptarget/OpenMP/InteropAPI.cpp
@@ -124,7 +124,7 @@ void *getProperty<void *>(omp_interop_val_t &InteropVal,
case omp_ipr_device_context:
return InteropVal.device_info.Context;
case omp_ipr_targetsync:
- return InteropVal.async_info->Queue;
+ return InteropVal.async_info ? InteropVal.async_info->Queue : nullptr;
default:;
}
getTypeMismatch(Property, Err);
@@ -167,7 +167,6 @@ bool getPropertyCheck(omp_interop_val_t **InteropPtr,
omp_interop_property_t property_id, \
int *err) { \
omp_interop_val_t *interop_val = (omp_interop_val_t *)interop; \
- assert((interop_val)->interop_type == kmp_interop_type_targetsync); \
if (!getPropertyCheck(&interop_val, property_id, err)) { \
return (RETURN_TYPE)(0); \
} \
@@ -275,8 +274,8 @@ omp_interop_val_t *__tgt_interop_get(ident_t *LocRef, int32_t InteropType,
return Interop;
}
-int __tgt_interop_use(ident_t *LocRef, omp_interop_val_t *Interop,
- interop_ctx_t *Ctx, dep_pack_t *Deps) {
+int __tgt_interop_use60(ident_t *LocRef, omp_interop_val_t *Interop,
+ interop_ctx_t *Ctx, dep_pack_t *Deps) {
bool Nowait = Ctx->flags.nowait;
DP("Call to %s with interop " DPxMOD ", nowait %" PRId32 "\n", __func__,
DPxPTR(Interop), Nowait);
@@ -359,6 +358,40 @@ EXTERN int ompx_interop_add_completion_callback(omp_interop_val_t *Interop,
return omp_irc_success;
}
+// Backwards compatibility wrappers
+void __tgt_interop_init(ident_t *LocRef, int32_t Gtid,
+ omp_interop_val_t *&InteropPtr, int32_t InteropType,
+ int32_t DeviceId, int32_t Ndeps,
+ kmp_depend_info_t *DepList, int32_t HaveNowait) {
+ constexpr int32_t old_kmp_interop_type_targetsync = 2;
+ interop_ctx_t Ctx = {0, {false, (bool)HaveNowait, 0}, Gtid};
+ dep_pack_t Deps = {Ndeps, 0, DepList, nullptr};
+ InteropPtr =
+ __tgt_interop_get(LocRef,
+ InteropType == old_kmp_interop_type_targetsync
+ ? kmp_interop_type_targetsync
+ : kmp_interop_type_target,
+ DeviceId, 0, nullptr, &Ctx, Ndeps ? &Deps : nullptr);
+}
+
+void __tgt_interop_use(ident_t *LocRef, int32_t Gtid,
+ omp_interop_val_t *&InteropPtr, int32_t DeviceId,
+ int32_t Ndeps, kmp_depend_info_t *DepList,
+ int32_t HaveNowait) {
+ interop_ctx_t Ctx = {0, {false, (bool)HaveNowait, 0}, Gtid};
+ dep_pack_t Deps = {Ndeps, 0, DepList, nullptr};
+ __tgt_interop_use60(LocRef, InteropPtr, &Ctx, Ndeps ? &Deps : nullptr);
+}
+
+void __tgt_interop_destroy(ident_t *LocRef, int32_t Gtid,
+ omp_interop_val_t *&InteropPtr, int32_t DeviceId,
+ int32_t Ndeps, kmp_depend_info_t *DepList,
+ int32_t HaveNowait) {
+ interop_ctx_t Ctx = {0, {false, (bool)HaveNowait, 0}, Gtid};
+ dep_pack_t Deps = {Ndeps, 0, DepList, nullptr};
+ __tgt_interop_release(LocRef, InteropPtr, &Ctx, Ndeps ? &Deps : nullptr);
+}
+
} // extern "C"
llvm::Expected<DeviceTy &> omp_interop_val_t::getDevice() const {
diff --git a/offload/libomptarget/exports b/offload/libomptarget/exports
index 8e2db6b..1374bfe 100644
--- a/offload/libomptarget/exports
+++ b/offload/libomptarget/exports
@@ -68,8 +68,11 @@ VERS1.0 {
omp_get_interop_int;
omp_get_interop_name;
omp_get_interop_type_desc;
- __tgt_interop_get;
+ __tgt_interop_init;
__tgt_interop_use;
+ __tgt_interop_destroy;
+ __tgt_interop_get;
+ __tgt_interop_use60;
__tgt_interop_release;
__tgt_target_sync;
__llvmPushCallConfiguration;
diff --git a/offload/plugins-nextgen/amdgpu/src/rtl.cpp b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
index 7b834ee..f73fa047 100644
--- a/offload/plugins-nextgen/amdgpu/src/rtl.cpp
+++ b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
@@ -2712,6 +2712,37 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
return Plugin::success();
}
+ interop_spec_t selectInteropPreference(int32_t InteropType,
+ int32_t NumPrefers,
+ interop_spec_t *Prefers) override {
+ // TODO: update once targetsync is supported
+ if (InteropType == kmp_interop_type_target)
+ return interop_spec_t{tgt_fr_hsa, {false, 0}, 0};
+ return interop_spec_t{tgt_fr_none, {false, 0}, 0};
+ }
+
+ Expected<omp_interop_val_t *>
+ createInterop(int32_t InteropType, interop_spec_t &InteropSpec) override {
+ auto *Ret = new omp_interop_val_t(
+ DeviceId, static_cast<kmp_interop_type_t>(InteropType));
+ Ret->fr_id = tgt_fr_hsa;
+ Ret->vendor_id = omp_vendor_amd;
+
+ // TODO: implement targetsync support
+
+ Ret->device_info.Platform = nullptr;
+ Ret->device_info.Device = reinterpret_cast<void *>(Agent.handle);
+ Ret->device_info.Context = nullptr;
+
+ return Ret;
+ }
+
+ Error releaseInterop(omp_interop_val_t *Interop) override {
+ if (Interop)
+ delete Interop;
+ return Plugin::success();
+ }
+
Error enqueueHostCallImpl(void (*Callback)(void *), void *UserData,
AsyncInfoWrapperTy &AsyncInfo) override {
AMDGPUStreamTy *Stream = nullptr;
diff --git a/offload/plugins-nextgen/cuda/src/rtl.cpp b/offload/plugins-nextgen/cuda/src/rtl.cpp
index b30c651..e5c4a1b 100644
--- a/offload/plugins-nextgen/cuda/src/rtl.cpp
+++ b/offload/plugins-nextgen/cuda/src/rtl.cpp
@@ -917,6 +917,50 @@ struct CUDADeviceTy : public GenericDeviceTy {
return Plugin::success();
}
+ interop_spec_t selectInteropPreference(int32_t InteropType,
+ int32_t NumPrefers,
+ interop_spec_t *Prefers) override {
+ return interop_spec_t{tgt_fr_cuda, {true, 0}, 0};
+ }
+
+ Expected<omp_interop_val_t *>
+ createInterop(int32_t InteropType, interop_spec_t &InteropSpec) override {
+ auto *Ret = new omp_interop_val_t(
+ DeviceId, static_cast<kmp_interop_type_t>(InteropType));
+ Ret->fr_id = tgt_fr_cuda;
+ Ret->vendor_id = omp_vendor_nvidia;
+
+ if (InteropType == kmp_interop_type_target ||
+ InteropType == kmp_interop_type_targetsync) {
+ Ret->device_info.Platform = nullptr;
+ Ret->device_info.Device = reinterpret_cast<void *>(Device);
+ Ret->device_info.Context = Context;
+ }
+
+ if (InteropType == kmp_interop_type_targetsync) {
+ Ret->async_info = new __tgt_async_info();
+ if (auto Err = setContext())
+ return Err;
+ CUstream Stream;
+ if (auto Err = CUDAStreamManager.getResource(Stream))
+ return Err;
+
+ Ret->async_info->Queue = Stream;
+ }
+ return Ret;
+ }
+
+ Error releaseInterop(omp_interop_val_t *Interop) override {
+ if (!Interop)
+ return Plugin::success();
+
+ if (Interop->async_info)
+ delete Interop->async_info;
+
+ delete Interop;
+ return Plugin::success();
+ }
+
Error enqueueHostCallImpl(void (*Callback)(void *), void *UserData,
AsyncInfoWrapperTy &AsyncInfo) override {
if (auto Err = setContext())
diff --git a/offload/test/offloading/fortran/target-declare-mapper-parent-allocatable.f90 b/offload/test/offloading/fortran/target-declare-mapper-parent-allocatable.f90
new file mode 100644
index 0000000..65e04af
--- /dev/null
+++ b/offload/test/offloading/fortran/target-declare-mapper-parent-allocatable.f90
@@ -0,0 +1,43 @@
+! This test validates that declare mapper for a derived type that extends
+! a parent type with an allocatable component correctly maps the nested
+! allocatable payload via the mapper when the whole object is mapped on
+! target.
+
+! REQUIRES: flang, amdgpu
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+
+program target_declare_mapper_parent_allocatable
+ implicit none
+
+ type, abstract :: base_t
+ real, allocatable :: base_arr(:)
+ end type base_t
+
+ type, extends(base_t) :: real_t
+ real, allocatable :: real_arr(:)
+ end type real_t
+ !$omp declare mapper(custommapper: real_t :: t) map(t%base_arr, t%real_arr)
+
+ type(real_t) :: r
+ integer :: i
+ allocate(r%base_arr(10), source=1.0)
+ allocate(r%real_arr(10), source=1.0)
+
+ !$omp target map(mapper(custommapper), tofrom: r)
+ do i = 1, size(r%base_arr)
+ r%base_arr(i) = 2.0
+ r%real_arr(i) = 3.0
+ r%real_arr(i) = r%base_arr(1)
+ end do
+ !$omp end target
+
+
+ !CHECK: base_arr: 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.
+ print*, "base_arr: ", r%base_arr
+ !CHECK: real_arr: 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.
+ print*, "real_arr: ", r%real_arr
+
+ deallocate(r%real_arr)
+ deallocate(r%base_arr)
+end program target_declare_mapper_parent_allocatable
diff --git a/orc-rt/include/orc-rt/CallableTraitsHelper.h b/orc-rt/include/orc-rt/CallableTraitsHelper.h
new file mode 100644
index 0000000..12d7d56
--- /dev/null
+++ b/orc-rt/include/orc-rt/CallableTraitsHelper.h
@@ -0,0 +1,74 @@
+//===- CallableTraitsHelper.h - Callable arg/ret type extractor -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// CallableTraitsHelper API.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_CALLABLETRAITSHELPER_H
+#define ORC_RT_CALLABLETRAITSHELPER_H
+
+#include <tuple>
+#include <type_traits>
+
+namespace orc_rt {
+
+/// CallableTraitsHelper takes an implementation class template Impl and some
+/// callable type C and passes the return and argument types of C to the Impl
+/// class template.
+///
+/// This can be used to simplify the implementation of classes that need to
+/// operate on callable types.
+template <template <typename...> typename ImplT, typename C>
+struct CallableTraitsHelper
+ : public CallableTraitsHelper<
+ ImplT,
+ decltype(&std::remove_cv_t<std::remove_reference_t<C>>::operator())> {
+};
+
+template <template <typename...> typename ImplT, typename RetT,
+ typename... ArgTs>
+struct CallableTraitsHelper<ImplT, RetT(ArgTs...)>
+ : public ImplT<RetT, ArgTs...> {};
+
+template <template <typename...> typename ImplT, typename RetT,
+ typename... ArgTs>
+struct CallableTraitsHelper<ImplT, RetT (*)(ArgTs...)>
+ : public CallableTraitsHelper<ImplT, RetT(ArgTs...)> {};
+
+template <template <typename...> typename ImplT, typename RetT,
+ typename... ArgTs>
+struct CallableTraitsHelper<ImplT, RetT (&)(ArgTs...)>
+ : public CallableTraitsHelper<ImplT, RetT(ArgTs...)> {};
+
+template <template <typename...> typename ImplT, typename ClassT, typename RetT,
+ typename... ArgTs>
+struct CallableTraitsHelper<ImplT, RetT (ClassT::*)(ArgTs...)>
+ : public CallableTraitsHelper<ImplT, RetT(ArgTs...)> {};
+
+template <template <typename...> typename ImplT, typename ClassT, typename RetT,
+ typename... ArgTs>
+struct CallableTraitsHelper<ImplT, RetT (ClassT::*)(ArgTs...) const>
+ : public CallableTraitsHelper<ImplT, RetT(ArgTs...)> {};
+
+namespace detail {
+template <typename RetT, typename... ArgTs> struct CallableArgInfoImpl {
+ typedef RetT return_type;
+ typedef std::tuple<ArgTs...> args_tuple_type;
+};
+} // namespace detail
+
+/// CallableArgInfo provides typedefs for the return type and argument types
+/// (as a tuple) of the given callable type.
+template <typename Callable>
+struct CallableArgInfo
+ : public CallableTraitsHelper<detail::CallableArgInfoImpl, Callable> {};
+
+} // namespace orc_rt
+
+#endif // ORC_RT_CALLABLETRAITSHELPER_H
diff --git a/orc-rt/include/orc-rt/Error.h b/orc-rt/include/orc-rt/Error.h
index fe0754b..48d9064 100644
--- a/orc-rt/include/orc-rt/Error.h
+++ b/orc-rt/include/orc-rt/Error.h
@@ -114,7 +114,7 @@ private:
void setChecked(bool Checked) { ErrPtr = (ErrPtr & ~uintptr_t(1)) | Checked; }
template <typename ErrT = ErrorInfoBase> std::unique_ptr<ErrT> takePayload() {
- static_assert(std::is_base_of<ErrorInfoBase, ErrT>::value,
+ static_assert(std::is_base_of_v<ErrorInfoBase, ErrT>,
"ErrT is not an ErrorInfoBase subclass");
std::unique_ptr<ErrT> Tmp(getPtr<ErrT>());
setPtr(nullptr);
@@ -288,11 +288,15 @@ private:
Error *Err;
};
+/// Tag to force construction of an Expected value in the success state. See
+/// Expected constructor for details.
+struct ForceExpectedSuccessValue {};
+
template <typename T> class ORC_RT_NODISCARD Expected {
template <class OtherT> friend class Expected;
- static constexpr bool IsRef = std::is_reference<T>::value;
+ static constexpr bool IsRef = std::is_reference_v<T>;
using wrap = std::reference_wrapper<std::remove_reference_t<T>>;
using error_type = std::unique_ptr<ErrorInfoBase>;
using storage_type = std::conditional_t<IsRef, wrap, T>;
@@ -310,10 +314,17 @@ public:
new (getErrorStorage()) error_type(Err.takePayload());
}
+ template <typename OtherT>
+ Expected(OtherT &&Val, ForceExpectedSuccessValue _,
+ std::enable_if_t<std::is_convertible_v<OtherT, T>> * = nullptr)
+ : HasError(false), Unchecked(true) {
+ new (getStorage()) storage_type(std::forward<OtherT>(Val));
+ }
+
/// Create an Expected from a T value.
template <typename OtherT>
Expected(OtherT &&Val,
- std::enable_if_t<std::is_convertible<OtherT, T>::value> * = nullptr)
+ std::enable_if_t<std::is_convertible_v<OtherT, T>> * = nullptr)
: HasError(false), Unchecked(true) {
new (getStorage()) storage_type(std::forward<OtherT>(Val));
}
@@ -324,9 +335,8 @@ public:
/// Move construct an Expected<T> value from an Expected<OtherT>, where OtherT
/// must be convertible to T.
template <class OtherT>
- Expected(
- Expected<OtherT> &&Other,
- std::enable_if_t<std::is_convertible<OtherT, T>::value> * = nullptr) {
+ Expected(Expected<OtherT> &&Other,
+ std::enable_if_t<std::is_convertible_v<OtherT, T>> * = nullptr) {
moveConstruct(std::move(Other));
}
@@ -335,7 +345,7 @@ public:
template <class OtherT>
explicit Expected(
Expected<OtherT> &&Other,
- std::enable_if_t<!std::is_convertible<OtherT, T>::value> * = nullptr) {
+ std::enable_if_t<!std::is_convertible_v<OtherT, T>> * = nullptr) {
moveConstruct(std::move(Other));
}
diff --git a/orc-rt/include/orc-rt/SPSWrapperFunction.h b/orc-rt/include/orc-rt/SPSWrapperFunction.h
index d08176f..14a3d8e 100644
--- a/orc-rt/include/orc-rt/SPSWrapperFunction.h
+++ b/orc-rt/include/orc-rt/SPSWrapperFunction.h
@@ -20,9 +20,11 @@
namespace orc_rt {
namespace detail {
-template <typename... SPSArgTs> struct WFSPSSerializer {
- template <typename... ArgTs>
- std::optional<WrapperFunctionBuffer> operator()(const ArgTs &...Args) {
+template <typename... SPSArgTs> struct WFSPSHelper {
+private:
+ template <typename... SerializableArgTs>
+ std::optional<WrapperFunctionBuffer>
+ serializeImpl(const SerializableArgTs &...Args) {
auto R =
WrapperFunctionBuffer::allocate(SPSArgList<SPSArgTs...>::size(Args...));
SPSOutputBuffer OB(R.data(), R.size());
@@ -30,15 +32,62 @@ template <typename... SPSArgTs> struct WFSPSSerializer {
return std::nullopt;
return std::move(R);
}
-};
-template <typename... SPSArgTs> struct WFSPSDeserializer {
+ template <typename T> static const T &toSerializable(const T &Arg) noexcept {
+ return Arg;
+ }
+
+ static SPSSerializableError toSerializable(Error Err) noexcept {
+ return SPSSerializableError(std::move(Err));
+ }
+
+ template <typename T>
+ static SPSSerializableExpected<T> toSerializable(Expected<T> Arg) noexcept {
+ return SPSSerializableExpected<T>(std::move(Arg));
+ }
+
+ template <typename... Ts> struct DeserializableTuple;
+
+ template <typename... Ts> struct DeserializableTuple<std::tuple<Ts...>> {
+ typedef std::tuple<
+ std::decay_t<decltype(toSerializable(std::declval<Ts>()))>...>
+ type;
+ };
+
+ template <typename... Ts>
+ using DeserializableTuple_t = typename DeserializableTuple<Ts...>::type;
+
+ template <typename T> static T fromSerializable(T &&Arg) noexcept {
+ return Arg;
+ }
+
+ static Error fromSerializable(SPSSerializableError Err) noexcept {
+ return Err.toError();
+ }
+
+ template <typename T>
+ static Expected<T> fromSerializable(SPSSerializableExpected<T> Val) noexcept {
+ return Val.toExpected();
+ }
+
+public:
template <typename... ArgTs>
- bool operator()(WrapperFunctionBuffer &ArgBytes, ArgTs &...Args) {
+ std::optional<WrapperFunctionBuffer> serialize(ArgTs &&...Args) {
+ return serializeImpl(toSerializable(std::forward<ArgTs>(Args))...);
+ }
+
+ template <typename ArgTuple>
+ std::optional<ArgTuple> deserialize(WrapperFunctionBuffer ArgBytes) {
assert(!ArgBytes.getOutOfBandError() &&
"Should not attempt to deserialize out-of-band error");
SPSInputBuffer IB(ArgBytes.data(), ArgBytes.size());
- return SPSArgList<SPSArgTs...>::deserialize(IB, Args...);
+ DeserializableTuple_t<ArgTuple> Args;
+ if (!SPSSerializationTraits<SPSTuple<SPSArgTs...>,
+ decltype(Args)>::deserialize(IB, Args))
+ return std::nullopt;
+ return std::apply(
+ [](auto &&...A) { return ArgTuple(fromSerializable(A)...); },
+ std::move(Args));
}
};
@@ -48,19 +97,8 @@ template <typename SPSSig> struct WrapperFunctionSPSSerializer;
template <typename SPSRetT, typename... SPSArgTs>
struct WrapperFunctionSPSSerializer<SPSRetT(SPSArgTs...)> {
- static detail::WFSPSSerializer<SPSArgTs...> argumentSerializer() noexcept {
- return {};
- }
- static detail::WFSPSDeserializer<SPSArgTs...>
- argumentDeserializer() noexcept {
- return {};
- }
- static detail::WFSPSSerializer<SPSRetT> resultSerializer() noexcept {
- return {};
- }
- static detail::WFSPSDeserializer<SPSRetT> resultDeserializer() noexcept {
- return {};
- }
+ static detail::WFSPSHelper<SPSArgTs...> arguments() noexcept { return {}; }
+ static detail::WFSPSHelper<SPSRetT> result() noexcept { return {}; }
};
/// Provides call and handle utilities to simplify writing and invocation of
diff --git a/orc-rt/include/orc-rt/WrapperFunction.h b/orc-rt/include/orc-rt/WrapperFunction.h
index bedc097..ca165db 100644
--- a/orc-rt/include/orc-rt/WrapperFunction.h
+++ b/orc-rt/include/orc-rt/WrapperFunction.h
@@ -14,6 +14,7 @@
#define ORC_RT_WRAPPERFUNCTION_H
#include "orc-rt-c/WrapperFunction.h"
+#include "orc-rt/CallableTraitsHelper.h"
#include "orc-rt/Error.h"
#include "orc-rt/bind.h"
@@ -105,37 +106,16 @@ private:
namespace detail {
-template <typename C>
-struct WFCallableTraits
- : public WFCallableTraits<
- decltype(&std::remove_cv_t<std::remove_reference_t<C>>::operator())> {
-};
-
-template <typename RetT> struct WFCallableTraits<RetT()> {
- typedef void HeadArgType;
+template <typename RetT, typename ReturnT, typename... ArgTs>
+struct WFHandlerTraitsImpl {
+ static_assert(std::is_void_v<RetT>,
+ "Async wrapper function handler must return void");
+ typedef ReturnT YieldType;
+ typedef std::tuple<ArgTs...> ArgTupleType;
};
-template <typename RetT, typename ArgT, typename... ArgTs>
-struct WFCallableTraits<RetT(ArgT, ArgTs...)> {
- typedef ArgT HeadArgType;
- typedef std::tuple<ArgTs...> TailArgTuple;
-};
-
-template <typename RetT, typename... ArgTs>
-struct WFCallableTraits<RetT (*)(ArgTs...)>
- : public WFCallableTraits<RetT(ArgTs...)> {};
-
-template <typename RetT, typename... ArgTs>
-struct WFCallableTraits<RetT (&)(ArgTs...)>
- : public WFCallableTraits<RetT(ArgTs...)> {};
-
-template <typename ClassT, typename RetT, typename... ArgTs>
-struct WFCallableTraits<RetT (ClassT::*)(ArgTs...)>
- : public WFCallableTraits<RetT(ArgTs...)> {};
-
-template <typename ClassT, typename RetT, typename... ArgTs>
-struct WFCallableTraits<RetT (ClassT::*)(ArgTs...) const>
- : public WFCallableTraits<RetT(ArgTs...)> {};
+template <typename C>
+using WFHandlerTraits = CallableTraitsHelper<WFHandlerTraitsImpl, C>;
template <typename Serializer> class StructuredYieldBase {
public:
@@ -151,12 +131,15 @@ protected:
std::decay_t<Serializer> S;
};
+template <typename RetT, typename Serializer> class StructuredYield;
+
template <typename RetT, typename Serializer>
-class StructuredYield : public StructuredYieldBase<Serializer> {
+class StructuredYield<std::tuple<RetT>, Serializer>
+ : public StructuredYieldBase<Serializer> {
public:
using StructuredYieldBase<Serializer>::StructuredYieldBase;
void operator()(RetT &&R) {
- if (auto ResultBytes = this->S.resultSerializer()(std::forward<RetT>(R)))
+ if (auto ResultBytes = this->S.result().serialize(std::forward<RetT>(R)))
this->Return(this->Session, this->CallCtx, ResultBytes->release());
else
this->Return(this->Session, this->CallCtx,
@@ -167,7 +150,7 @@ public:
};
template <typename Serializer>
-class StructuredYield<void, Serializer>
+class StructuredYield<std::tuple<>, Serializer>
: public StructuredYieldBase<Serializer> {
public:
using StructuredYieldBase<Serializer>::StructuredYieldBase;
@@ -180,18 +163,20 @@ public:
template <typename T, typename Serializer> struct ResultDeserializer;
template <typename T, typename Serializer>
-struct ResultDeserializer<Expected<T>, Serializer> {
+struct ResultDeserializer<std::tuple<Expected<T>>, Serializer> {
static Expected<T> deserialize(WrapperFunctionBuffer ResultBytes,
Serializer &S) {
- T Val;
- if (S.resultDeserializer()(ResultBytes, Val))
- return std::move(Val);
+ if (auto Val = S.result().template deserialize<std::tuple<T>>(
+ std::move(ResultBytes)))
+ return Expected<T>(std::move(std::get<0>(*Val)),
+ ForceExpectedSuccessValue());
else
return make_error<StringError>("Could not deserialize result");
}
};
-template <typename Serializer> struct ResultDeserializer<Error, Serializer> {
+template <typename Serializer>
+struct ResultDeserializer<std::tuple<Error>, Serializer> {
static Error deserialize(WrapperFunctionBuffer ResultBytes, Serializer &S) {
assert(ResultBytes.empty());
return Error::success();
@@ -213,13 +198,15 @@ struct WrapperFunction {
typename... ArgTs>
static void call(Caller &&C, Serializer &&S, ResultHandler &&RH,
ArgTs &&...Args) {
- typedef detail::WFCallableTraits<ResultHandler> ResultHandlerTraits;
+ typedef CallableArgInfo<ResultHandler> ResultHandlerTraits;
+ static_assert(std::is_void_v<typename ResultHandlerTraits::return_type>,
+ "Result handler should return void");
static_assert(
- std::tuple_size_v<typename ResultHandlerTraits::TailArgTuple> == 0,
- "Expected one argument to result-handler");
- typedef typename ResultHandlerTraits::HeadArgType ResultType;
+ std::tuple_size_v<typename ResultHandlerTraits::args_tuple_type> == 1,
+ "Result-handler should have exactly one argument");
+ typedef typename ResultHandlerTraits::args_tuple_type ResultTupleType;
- if (auto ArgBytes = S.argumentSerializer()(std::forward<ArgTs>(Args)...)) {
+ if (auto ArgBytes = S.arguments().serialize(std::forward<ArgTs>(Args)...)) {
C(
[RH = std::move(RH),
S = std::move(S)](orc_rt_SessionRef Session,
@@ -227,9 +214,8 @@ struct WrapperFunction {
if (const char *ErrMsg = ResultBytes.getOutOfBandError())
RH(make_error<StringError>(ErrMsg));
else
- RH(detail::ResultDeserializer<
- ResultType, Serializer>::deserialize(std::move(ResultBytes),
- S));
+ RH(detail::ResultDeserializer<ResultTupleType, Serializer>::
+ deserialize(std::move(ResultBytes), S));
},
std::move(*ArgBytes));
} else
@@ -246,21 +232,22 @@ struct WrapperFunction {
orc_rt_WrapperFunctionReturn Return,
WrapperFunctionBuffer ArgBytes, Serializer &&S,
Handler &&H) {
- typedef detail::WFCallableTraits<Handler> HandlerTraits;
- typedef typename HandlerTraits::HeadArgType Yield;
- typedef typename HandlerTraits::TailArgTuple ArgTuple;
- typedef typename detail::WFCallableTraits<Yield>::HeadArgType RetType;
+ typedef detail::WFHandlerTraits<Handler> HandlerTraits;
+ typedef typename HandlerTraits::ArgTupleType ArgTuple;
+ typedef typename HandlerTraits::YieldType Yield;
+ static_assert(std::is_void_v<typename CallableArgInfo<Yield>::return_type>,
+ "Return callback must return void");
+ typedef typename CallableArgInfo<Yield>::args_tuple_type RetTupleType;
if (ArgBytes.getOutOfBandError())
return Return(Session, CallCtx, ArgBytes.release());
- ArgTuple Args;
- if (std::apply(bind_front(S.argumentDeserializer(), std::move(ArgBytes)),
- Args))
+ if (auto Args =
+ S.arguments().template deserialize<ArgTuple>(std::move(ArgBytes)))
std::apply(bind_front(std::forward<Handler>(H),
- detail::StructuredYield<RetType, Serializer>(
+ detail::StructuredYield<RetTupleType, Serializer>(
Session, CallCtx, Return, std::move(S))),
- std::move(Args));
+ std::move(*Args));
else
Return(Session, CallCtx,
WrapperFunctionBuffer::createOutOfBandError(
diff --git a/orc-rt/unittests/CMakeLists.txt b/orc-rt/unittests/CMakeLists.txt
index f29fb1e..54c453d 100644
--- a/orc-rt/unittests/CMakeLists.txt
+++ b/orc-rt/unittests/CMakeLists.txt
@@ -14,6 +14,7 @@ endfunction()
add_orc_rt_unittest(CoreTests
AllocActionTest.cpp
BitmaskEnumTest.cpp
+ CallableTraitsHelperTest.cpp
CommonTestUtils.cpp
ErrorTest.cpp
ExecutorAddressTest.cpp
diff --git a/orc-rt/unittests/CallableTraitsHelperTest.cpp b/orc-rt/unittests/CallableTraitsHelperTest.cpp
new file mode 100644
index 0000000..1db3916
--- /dev/null
+++ b/orc-rt/unittests/CallableTraitsHelperTest.cpp
@@ -0,0 +1,69 @@
+//===- CallableTraitsHelperTest.cpp ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Tests for orc-rt's CallableTraitsHelper.h APIs.
+//
+// NOTE: All tests in this file are testing compile-time functionality, so the
+// tests at runtime all end up being noops. That's fine -- those are
+// cheap.
+//===----------------------------------------------------------------------===//
+
+#include "orc-rt/CallableTraitsHelper.h"
+#include "gtest/gtest.h"
+
+using namespace orc_rt;
+
+static void freeVoidVoid() {}
+
+TEST(CallableTraitsHelperTest, FreeVoidVoid) {
+ (void)freeVoidVoid;
+ typedef CallableArgInfo<decltype(freeVoidVoid)> CAI;
+ static_assert(std::is_void_v<CAI::return_type>);
+ static_assert(std::is_same_v<CAI::args_tuple_type, std::tuple<>>);
+}
+
+static int freeBinaryOp(int, float) { return 0; }
+
+TEST(CallableTraitsHelperTest, FreeBinaryOp) {
+ (void)freeBinaryOp;
+ typedef CallableArgInfo<decltype(freeBinaryOp)> CAI;
+ static_assert(std::is_same_v<CAI::return_type, int>);
+ static_assert(std::is_same_v<CAI::args_tuple_type, std::tuple<int, float>>);
+}
+
+TEST(CallableTraitsHelperTest, VoidVoidObj) {
+ auto VoidVoid = []() {};
+ typedef CallableArgInfo<decltype(VoidVoid)> CAI;
+ static_assert(std::is_void_v<CAI::return_type>);
+ static_assert(std::is_same_v<CAI::args_tuple_type, std::tuple<>>);
+}
+
+TEST(CallableTraitsHelperTest, BinaryOpObj) {
+ auto BinaryOp = [](int X, float Y) -> int { return X + Y; };
+ typedef CallableArgInfo<decltype(BinaryOp)> CAI;
+ static_assert(std::is_same_v<CAI::return_type, int>);
+ static_assert(std::is_same_v<CAI::args_tuple_type, std::tuple<int, float>>);
+}
+
+TEST(CallableTraitsHelperTest, PreservesLValueRef) {
+ auto RefOp = [](int &) {};
+ typedef CallableArgInfo<decltype(RefOp)> CAI;
+ static_assert(std::is_same_v<CAI::args_tuple_type, std::tuple<int &>>);
+}
+
+TEST(CallableTraitsHelperTest, PreservesLValueRefConstness) {
+ auto RefOp = [](const int &) {};
+ typedef CallableArgInfo<decltype(RefOp)> CAI;
+ static_assert(std::is_same_v<CAI::args_tuple_type, std::tuple<const int &>>);
+}
+
+TEST(CallableTraitsHelperTest, PreservesRValueRef) {
+ auto RefOp = [](int &&) {};
+ typedef CallableArgInfo<decltype(RefOp)> CAI;
+ static_assert(std::is_same_v<CAI::args_tuple_type, std::tuple<int &&>>);
+}
diff --git a/orc-rt/unittests/ErrorTest.cpp b/orc-rt/unittests/ErrorTest.cpp
index 3fd8279..260b6afc 100644
--- a/orc-rt/unittests/ErrorTest.cpp
+++ b/orc-rt/unittests/ErrorTest.cpp
@@ -386,6 +386,54 @@ TEST(ErrorTest, ExpectedCovariance) {
(void)!!A2;
}
+// Test that Expected<Error> works as expected.
+TEST(ErrorTest, ExpectedError) {
+ {
+ // Test success-success case.
+ Expected<Error> E(Error::success(), ForceExpectedSuccessValue());
+ EXPECT_TRUE(!!E);
+ cantFail(E.takeError());
+ auto Err = std::move(*E);
+ EXPECT_FALSE(!!Err);
+ }
+
+ {
+ // Test "failure" success case.
+ Expected<Error> E(make_error<StringError>("foo"),
+ ForceExpectedSuccessValue());
+ EXPECT_TRUE(!!E);
+ cantFail(E.takeError());
+ auto Err = std::move(*E);
+ EXPECT_TRUE(!!Err);
+ EXPECT_EQ(toString(std::move(Err)), "foo");
+ }
+}
+
+// Test that Expected<Expected<T>> works as expected.
+TEST(ErrorTest, ExpectedExpected) {
+ {
+ // Test success-success case.
+ Expected<Expected<int>> E(Expected<int>(42), ForceExpectedSuccessValue());
+ EXPECT_TRUE(!!E);
+ cantFail(E.takeError());
+ auto EI = std::move(*E);
+ EXPECT_TRUE(!!EI);
+ cantFail(EI.takeError());
+ EXPECT_EQ(*EI, 42);
+ }
+
+ {
+ // Test "failure" success case.
+ Expected<Expected<int>> E(Expected<int>(make_error<StringError>("foo")),
+ ForceExpectedSuccessValue());
+ EXPECT_TRUE(!!E);
+ cantFail(E.takeError());
+ auto EI = std::move(*E);
+ EXPECT_FALSE(!!EI);
+ EXPECT_EQ(toString(EI.takeError()), "foo");
+ }
+}
+
// Test that the ExitOnError utility works as expected.
TEST(ErrorTest, CantFailSuccess) {
cantFail(Error::success());
diff --git a/orc-rt/unittests/SPSWrapperFunctionTest.cpp b/orc-rt/unittests/SPSWrapperFunctionTest.cpp
index 0b65515..c0c86ff 100644
--- a/orc-rt/unittests/SPSWrapperFunctionTest.cpp
+++ b/orc-rt/unittests/SPSWrapperFunctionTest.cpp
@@ -144,3 +144,77 @@ TEST(SPSWrapperFunctionUtilsTest, TestBinaryOpViaFunctionPointer) {
[&](Expected<int32_t> R) { Result = cantFail(std::move(R)); }, 41, 1);
EXPECT_EQ(Result, 42);
}
+
+static void improbable_feat_sps_wrapper(orc_rt_SessionRef Session,
+ void *CallCtx,
+ orc_rt_WrapperFunctionReturn Return,
+ orc_rt_WrapperFunctionBuffer ArgBytes) {
+ SPSWrapperFunction<SPSError(bool)>::handle(
+ Session, CallCtx, Return, ArgBytes,
+ [](move_only_function<void(Error)> Return, bool LuckyHat) {
+ if (LuckyHat)
+ Return(Error::success());
+ else
+ Return(make_error<StringError>("crushed by boulder"));
+ });
+}
+
+TEST(SPSWrapperFunctionUtilsTest, TestFunctionReturningErrorSuccessCase) {
+ bool DidRun = false;
+ SPSWrapperFunction<SPSError(bool)>::call(
+ DirectCaller(nullptr, improbable_feat_sps_wrapper),
+ [&](Expected<Error> E) {
+ DidRun = true;
+ cantFail(cantFail(std::move(E)));
+ },
+ true);
+
+ EXPECT_TRUE(DidRun);
+}
+
+TEST(SPSWrapperFunctionUtilsTest, TestFunctionReturningErrorFailureCase) {
+ std::string ErrMsg;
+ SPSWrapperFunction<SPSError(bool)>::call(
+ DirectCaller(nullptr, improbable_feat_sps_wrapper),
+ [&](Expected<Error> E) { ErrMsg = toString(cantFail(std::move(E))); },
+ false);
+
+ EXPECT_EQ(ErrMsg, "crushed by boulder");
+}
+
+static void halve_number_sps_wrapper(orc_rt_SessionRef Session, void *CallCtx,
+ orc_rt_WrapperFunctionReturn Return,
+ orc_rt_WrapperFunctionBuffer ArgBytes) {
+ SPSWrapperFunction<SPSExpected<int32_t>(int32_t)>::handle(
+ Session, CallCtx, Return, ArgBytes,
+ [](move_only_function<void(Expected<int32_t>)> Return, int N) {
+ if (N % 2 == 0)
+ Return(N >> 1);
+ else
+ Return(make_error<StringError>("N is not a multiple of 2"));
+ });
+}
+
+TEST(SPSWrapperFunctionUtilsTest, TestFunctionReturningExpectedSuccessCase) {
+ int32_t Result = 0;
+ SPSWrapperFunction<SPSExpected<int32_t>(int32_t)>::call(
+ DirectCaller(nullptr, halve_number_sps_wrapper),
+ [&](Expected<Expected<int32_t>> R) {
+ Result = cantFail(cantFail(std::move(R)));
+ },
+ 2);
+
+ EXPECT_EQ(Result, 1);
+}
+
+TEST(SPSWrapperFunctionUtilsTest, TestFunctionReturningExpectedFailureCase) {
+ std::string ErrMsg;
+ SPSWrapperFunction<SPSExpected<int32_t>(int32_t)>::call(
+ DirectCaller(nullptr, halve_number_sps_wrapper),
+ [&](Expected<Expected<int32_t>> R) {
+ ErrMsg = toString(cantFail(std::move(R)).takeError());
+ },
+ 3);
+
+ EXPECT_EQ(ErrMsg, "N is not a multiple of 2");
+}
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index 8d9e803..026664b 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -2674,6 +2674,22 @@ libc_support_library(
)
libc_support_library(
+ name = "__support_math_exp10m1f16",
+ hdrs = ["src/__support/math/exp10m1f16.h"],
+ deps = [
+ ":__support_fputil_except_value_utils",
+ ":__support_fputil_fenv_impl",
+ ":__support_fputil_fp_bits",
+ ":__support_fputil_multiply_add",
+ ":__support_fputil_polyeval",
+ ":__support_fputil_rounding_mode",
+ ":__support_macros_optimization",
+ ":__support_math_exp10f16_utils",
+ ":errno",
+ ],
+)
+
+libc_support_library(
name = "__support_math_erff",
hdrs = ["src/__support/math/erff.h"],
deps = [
@@ -3622,7 +3638,7 @@ libc_math_function(
libc_math_function(
name = "exp10m1f16",
additional_deps = [
- ":__support_math_exp10f16_utils",
+ ":__support_math_exp10m1f16",
],
)
@@ -5320,6 +5336,7 @@ libc_support_library(
":__support_common",
":__support_cpp_bitset",
":__support_cpp_type_traits",
+ ":__support_macros_attributes",
":__support_macros_optimization",
":hdr_limits_macros",
":llvm_libc_types_size_t",
diff --git a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
index 7e36d0b..4064c2f 100644
--- a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
@@ -284,6 +284,7 @@ cc_library(
"//mlir:InferTypeOpInterface",
"//mlir:LLVMToLLVMIRTranslation",
"//mlir:Pass",
+ "//mlir:SCFDialect",
"//mlir:ToLLVMIRTranslation",
"//mlir:TransformUtils",
"//mlir:Transforms",